summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-07 12:45:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-07 12:45:58 -0700
commit3a3869f1c443383ef8354ffa0e5fb8df65d8b549 (patch)
tree7972a4444f04a91f4baab5896df33d4e55d9909d /drivers
parent3036bc45364f98515a2c446d7fac2c34dcfbeff4 (diff)
parent488ad6d3678beee65bcd74e6a9764bd7cee9d3d3 (diff)
downloadlinux-3a3869f1c443383ef8354ffa0e5fb8df65d8b549.tar.gz
Merge tag 'pci-v4.18-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull PCI updates from Bjorn Helgaas:

  - unify AER decoding for native and ACPI CPER sources (Alexandru
    Gagniuc)

  - add TLP header info to AER tracepoint (Thomas Tai)

  - add generic pcie_wait_for_link() interface (Oza Pawandeep)

  - handle AER ERR_FATAL by removing and re-enumerating devices, as
    Downstream Port Containment does (Oza Pawandeep)

  - factor out common code between AER and DPC recovery (Oza Pawandeep)

  - stop triggering DPC for ERR_NONFATAL errors (Oza Pawandeep)

  - share ERR_FATAL recovery path between AER and DPC (Oza Pawandeep)

  - disable ASPM L1.2 substate if we don't have LTR (Bjorn Helgaas)

  - respect platform ownership of LTR (Bjorn Helgaas)

  - clear interrupt status in top half to avoid interrupt storm (Oza
    Pawandeep)

  - neaten pci=earlydump output (Andy Shevchenko)

  - avoid errors when extended config space inaccessible (Gilles Buloz)

  - prevent sysfs disable of device while driver attached (Christoph
    Hellwig)

  - use core interface to report PCIe link properties in bnx2x, bnxt_en,
    cxgb4, ixgbe (Bjorn Helgaas)

  - remove unused pcie_get_minimum_link() (Bjorn Helgaas)

  - fix use-before-set error in ibmphp (Dan Carpenter)

  - fix pciehp timeouts caused by Command Completed errata (Bjorn
    Helgaas)

  - fix refcounting in pnv_php hotplug (Julia Lawall)

  - clear pciehp Presence Detect and Data Link Layer Status Changed on
    resume so we don't miss hotplug events (Mika Westerberg)

  - only request pciehp control if we support it, so platform can use
    ACPI hotplug otherwise (Mika Westerberg)

  - convert SHPC to be builtin only (Mika Westerberg)

  - request SHPC control via _OSC if we support it (Mika Westerberg)

  - simplify SHPC handoff from firmware (Mika Westerberg)

  - fix an SHPC quirk that mistakenly included *all* AMD bridges as well
    as devices from any vendor with device ID 0x7458 (Bjorn Helgaas)

  - assign a bus number even to non-native hotplug bridges to leave
    space for acpiphp additions, to fix a common Thunderbolt xHCI
    hot-add failure (Mika Westerberg)

  - keep acpiphp from scanning native hotplug bridges, to fix common
    Thunderbolt hot-add failures (Mika Westerberg)

  - improve "partially hidden behind bridge" messages from core (Mika
    Westerberg)

  - add macros for PCIe Link Control 2 register (Frederick Lawler)

  - replace IB/hfi1 custom macros with PCI core versions (Frederick
    Lawler)

  - remove dead microblaze and xtensa code (Bjorn Helgaas)

  - use dev_printk() when possible in xtensa and mips (Bjorn Helgaas)

  - remove unused pcie_port_acpi_setup() and portdrv_acpi.c (Bjorn
    Helgaas)

  - add managed interface to get PCI host bridge resources from OF (Jan
    Kiszka)

  - add support for unbinding generic PCI host controller (Jan Kiszka)

  - fix memory leaks when unbinding generic PCI host controller (Jan
    Kiszka)

  - request legacy VGA framebuffer only for VGA devices to avoid false
    device conflicts (Bjorn Helgaas)

  - turn on PCI_COMMAND_IO & PCI_COMMAND_MEMORY in pci_enable_device()
    like everybody else, not in pcibios_fixup_bus() (Bjorn Helgaas)

  - add generic enable function for simple SR-IOV hardware (Alexander
    Duyck)

  - use generic SR-IOV enable for ena, nvme (Alexander Duyck)

  - add ACS quirk for Intel 7th & 8th Gen mobile (Alex Williamson)

  - add ACS quirk for Intel 300 series (Mika Westerberg)

  - enable register clock for Armada 7K/8K (Gregory CLEMENT)

  - reduce Keystone "link already up" log level (Fabio Estevam)

  - move private DT functions to drivers/pci/ (Rob Herring)

  - factor out dwc CONFIG_PCI Kconfig dependencies (Rob Herring)

  - add DesignWare support to the endpoint test driver (Gustavo
    Pimentel)

  - add DesignWare support for endpoint mode (Gustavo Pimentel)

  - use devm_ioremap_resource() instead of devm_ioremap() in dra7xx and
    artpec6 (Gustavo Pimentel)

  - fix Qualcomm bitwise NOT issue (Dan Carpenter)

  - add Qualcomm runtime PM support (Srinivas Kandagatla)

  - fix DesignWare enumeration below bridges (Koen Vandeputte)

  - use usleep() instead of mdelay() in endpoint test (Jia-Ju Bai)

  - add configfs entries for pci_epf_driver device IDs (Kishon Vijay
    Abraham I)

  - clean up pci_endpoint_test driver (Gustavo Pimentel)

  - update Layerscape maintainer email addresses (Minghuan Lian)

  - add COMPILE_TEST to improve build test coverage (Rob Herring)

  - fix Hyper-V bus registration failure caused by domain/serial number
    confusion (Sridhar Pitchai)

  - improve Hyper-V refcounting and coding style (Stephen Hemminger)

  - avoid potential Hyper-V hang waiting for a response that will never
    come (Dexuan Cui)

  - implement Mediatek chained IRQ handling (Honghui Zhang)

  - fix vendor ID & class type for Mediatek MT7622 (Honghui Zhang)

  - add Mobiveil PCIe host controller driver (Subrahmanya Lingappa)

  - add Mobiveil MSI support (Subrahmanya Lingappa)

  - clean up clocks, MSI, IRQ mappings in R-Car probe failure paths
    (Marek Vasut)

  - poll more frequently (5us vs 5ms) while waiting for R-Car data link
    active (Marek Vasut)

  - use generic OF parsing interface in R-Car (Vladimir Zapolskiy)

  - add R-Car V3H (R8A77980) "compatible" string (Sergei Shtylyov)

  - add R-Car gen3 PHY support (Sergei Shtylyov)

  - improve R-Car PHYRDY polling (Sergei Shtylyov)

  - clean up R-Car macros (Marek Vasut)

  - use runtime PM for R-Car controller clock (Dien Pham)

  - update arm64 defconfig for Rockchip (Shawn Lin)

  - refactor Rockchip code to facilitate both root port and endpoint
    mode (Shawn Lin)

  - add Rockchip endpoint mode driver (Shawn Lin)

  - support VMD "membar shadow" feature (Jon Derrick)

  - support VMD bus number offsets (Jon Derrick)

  - add VMD "no AER source ID" quirk for more device IDs (Jon Derrick)

  - remove unnecessary host controller CONFIG_PCIEPORTBUS Kconfig
    selections (Bjorn Helgaas)

  - clean up quirks.c organization and whitespace (Bjorn Helgaas)

* tag 'pci-v4.18-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (144 commits)
  PCI/AER: Replace struct pcie_device with pci_dev
  PCI/AER: Remove unused parameters
  PCI: qcom: Include gpio/consumer.h
  PCI: Improve "partially hidden behind bridge" log message
  PCI: Improve pci_scan_bridge() and pci_scan_bridge_extend() doc
  PCI: Move resource distribution for single bridge outside loop
  PCI: Account for all bridges on bus when distributing bus numbers
  ACPI / hotplug / PCI: Drop unnecessary parentheses
  ACPI / hotplug / PCI: Mark stale PCI devices disconnected
  ACPI / hotplug / PCI: Don't scan bridges managed by native hotplug
  PCI: hotplug: Add hotplug_is_native()
  PCI: shpchp: Add shpchp_is_native()
  PCI: shpchp: Fix AMD POGO identification
  PCI: mobiveil: Add MSI support
  PCI: mobiveil: Add Mobiveil PCIe Host Bridge IP driver
  PCI/AER: Decode Error Source Requester ID
  PCI/AER: Remove aer_recover_work_func() forward declaration
  PCI/DPC: Use the generic pcie_do_fatal_recovery() path
  PCI/AER: Pass service type to pcie_do_fatal_recovery()
  PCI/DPC: Disable ERR_NONFATAL handling by DPC
  ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c24
-rw-r--r--drivers/iommu/amd_iommu.c11
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/misc/pci_endpoint_test.c29
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c75
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c47
-rw-r--r--drivers/nvme/host/pci.c20
-rw-r--r--drivers/pci/Kconfig12
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/ats.c3
-rw-r--r--drivers/pci/dwc/Kconfig88
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c19
-rw-r--r--drivers/pci/dwc/pci-imx6.c2
-rw-r--r--drivers/pci/dwc/pci-keystone.c2
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c21
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c6
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c19
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c80
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c155
-rw-r--r--drivers/pci/dwc/pcie-designware.c22
-rw-r--r--drivers/pci/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/dwc/pcie-qcom.c13
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c35
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c23
-rw-r--r--drivers/pci/host/Kconfig55
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c7
-rw-r--r--drivers/pci/host/pci-ftpci100.c6
-rw-r--r--drivers/pci/host/pci-host-common.c13
-rw-r--r--drivers/pci/host/pci-host-generic.c1
-rw-r--r--drivers/pci/host/pci-hyperv.c162
-rw-r--r--drivers/pci/host/pci-mvebu.c2
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c2
-rw-r--r--drivers/pci/host/pci-v3-semi.c5
-rw-r--r--drivers/pci/host/pci-versatile.c5
-rw-r--r--drivers/pci/host/pci-xgene.c5
-rw-r--r--drivers/pci/host/pcie-altera.c7
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c5
-rw-r--r--drivers/pci/host/pcie-mediatek.c236
-rw-r--r--drivers/pci/host/pcie-mobiveil.c866
-rw-r--r--drivers/pci/host/pcie-rcar.c284
-rw-r--r--drivers/pci/host/pcie-rockchip-ep.c642
-rw-r--r--drivers/pci/host/pcie-rockchip-host.c1142
-rw-r--r--drivers/pci/host/pcie-rockchip.c1580
-rw-r--r--drivers/pci/host/pcie-rockchip.h338
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c6
-rw-r--r--drivers/pci/host/pcie-xilinx.c6
-rw-r--r--drivers/pci/host/vmd.c91
-rw-r--r--drivers/pci/hotplug/Kconfig5
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c45
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c84
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c84
-rw-r--r--drivers/pci/hotplug/pnv_php.c8
-rw-r--r--drivers/pci/hotplug/shpchp.h12
-rw-r--r--drivers/pci/hotplug/shpchp_core.c14
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c8
-rw-r--r--drivers/pci/iov.c42
-rw-r--r--drivers/pci/of.c63
-rw-r--r--drivers/pci/pci-acpi.c55
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci-pf-stub.c54
-rw-r--r--drivers/pci/pci-sysfs.c15
-rw-r--r--drivers/pci/pci.c89
-rw-r--r--drivers/pci/pci.h45
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c11
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h32
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c397
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c38
-rw-r--r--drivers/pci/pcie/aspm.c9
-rw-r--r--drivers/pci/pcie/dpc.c74
-rw-r--r--drivers/pci/pcie/err.c388
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c57
-rw-r--r--drivers/pci/pcie/portdrv_core.c71
-rw-r--r--drivers/pci/probe.c96
-rw-r--r--drivers/pci/quirks.c1002
-rw-r--r--drivers/pci/setup-bus.c82
86 files changed, 5716 insertions, 3447 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 0da18bde6a16..7433035ded95 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -153,6 +153,7 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = {
 	{ OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
 	{ OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
 	{ OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
+	{ OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" },
 };
 
 static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
@@ -472,9 +473,17 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
 	}
 
 	control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
-		| OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
 		| OSC_PCI_EXPRESS_PME_CONTROL;
 
+	if (IS_ENABLED(CONFIG_PCIEASPM))
+		control |= OSC_PCI_EXPRESS_LTR_CONTROL;
+
+	if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+		control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+
+	if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
+		control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
+
 	if (pci_aer_available()) {
 		if (aer_acpi_firmware_first())
 			dev_info(&device->dev,
@@ -900,11 +909,15 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
 
 	host_bridge = to_pci_host_bridge(bus->bridge);
 	if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
-		host_bridge->native_hotplug = 0;
+		host_bridge->native_pcie_hotplug = 0;
+	if (!(root->osc_control_set & OSC_PCI_SHPC_NATIVE_HP_CONTROL))
+		host_bridge->native_shpc_hotplug = 0;
 	if (!(root->osc_control_set & OSC_PCI_EXPRESS_AER_CONTROL))
 		host_bridge->native_aer = 0;
 	if (!(root->osc_control_set & OSC_PCI_EXPRESS_PME_CONTROL))
 		host_bridge->native_pme = 0;
+	if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL))
+		host_bridge->native_ltr = 0;
 
 	pci_scan_child_bus(bus);
 	pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info,
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index c1c982908b4b..bf601c7629fb 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -56,11 +56,6 @@
 #include "chip_registers.h"
 #include "aspm.h"
 
-/* link speed vector for Gen3 speed - not in Linux headers */
-#define GEN1_SPEED_VECTOR 0x1
-#define GEN2_SPEED_VECTOR 0x2
-#define GEN3_SPEED_VECTOR 0x3
-
 /*
  * This file contains PCIe utility routines.
  */
@@ -262,7 +257,7 @@ static u32 extract_speed(u16 linkstat)
 	case PCI_EXP_LNKSTA_CLS_5_0GB:
 		speed = 5000; /* Gen 2, 5GHz */
 		break;
-	case GEN3_SPEED_VECTOR:
+	case PCI_EXP_LNKSTA_CLS_8_0GB:
 		speed = 8000; /* Gen 3, 8GHz */
 		break;
 	}
@@ -317,7 +312,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
 		return ret;
 	}
 
-	if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) {
+	if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) {
 		dd_dev_info(dd,
 			    "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
 			    linkcap & PCI_EXP_LNKCAP_SLS);
@@ -694,9 +689,6 @@ const struct pci_error_handlers hfi1_pci_err_handler = {
 /* gasket block secondary bus reset delay */
 #define SBR_DELAY_US 200000	/* 200ms */
 
-/* mask for PCIe capability register lnkctl2 target link speed */
-#define LNKCTL2_TARGET_LINK_SPEED_MASK 0xf
-
 static uint pcie_target = 3;
 module_param(pcie_target, uint, S_IRUGO);
 MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)");
@@ -1045,13 +1037,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
 		return 0;
 
 	if (pcie_target == 1) {			/* target Gen1 */
-		target_vector = GEN1_SPEED_VECTOR;
+		target_vector = PCI_EXP_LNKCTL2_TLS_2_5GT;
 		target_speed = 2500;
 	} else if (pcie_target == 2) {		/* target Gen2 */
-		target_vector = GEN2_SPEED_VECTOR;
+		target_vector = PCI_EXP_LNKCTL2_TLS_5_0GT;
 		target_speed = 5000;
 	} else if (pcie_target == 3) {		/* target Gen3 */
-		target_vector = GEN3_SPEED_VECTOR;
+		target_vector = PCI_EXP_LNKCTL2_TLS_8_0GT;
 		target_speed = 8000;
 	} else {
 		/* off or invalid target - skip */
@@ -1290,8 +1282,8 @@ retry:
 	dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
 		    (u32)lnkctl2);
 	/* only write to parent if target is not as high as ours */
-	if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) {
-		lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
+	if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) {
+		lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
 		lnkctl2 |= target_vector;
 		dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
 			    (u32)lnkctl2);
@@ -1316,7 +1308,7 @@ retry:
 
 	dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
 		    (u32)lnkctl2);
-	lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
+	lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
 	lnkctl2 |= target_vector;
 	dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
 		    (u32)lnkctl2);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8fb8c737fffe..d60c7dc62905 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -354,6 +354,9 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
 	};
 	int i, pos;
 
+	if (pci_ats_disabled())
+		return false;
+
 	for (i = 0; i < 3; ++i) {
 		pos = pci_find_ext_capability(pdev, caps[i]);
 		if (pos == 0)
@@ -3523,9 +3526,11 @@ int amd_iommu_device_info(struct pci_dev *pdev,
 
 	memset(info, 0, sizeof(*info));
 
-	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
-	if (pos)
-		info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+	if (!pci_ats_disabled()) {
+		pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
+		if (pos)
+			info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+	}
 
 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
 	if (pos)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 749d8f235346..772b404a6604 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2459,7 +2459,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
 	if (dev && dev_is_pci(dev)) {
 		struct pci_dev *pdev = to_pci_dev(info->dev);
 
-		if (ecap_dev_iotlb_support(iommu->ecap) &&
+		if (!pci_ats_disabled() &&
+		    ecap_dev_iotlb_support(iommu->ecap) &&
 		    pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
 		    dmar_find_matched_atsr_unit(pdev))
 			info->ats_supported = 1;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index fe8897e64635..7b370466a227 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -203,7 +203,7 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
 	if (!val)
 		return false;
 
-	if (test->last_irq - pdev->irq == msi_num - 1)
+	if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
 		return true;
 
 	return false;
@@ -233,7 +233,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
 	orig_src_addr = dma_alloc_coherent(dev, size + alignment,
 					   &orig_src_phys_addr, GFP_KERNEL);
 	if (!orig_src_addr) {
-		dev_err(dev, "failed to allocate source buffer\n");
+		dev_err(dev, "Failed to allocate source buffer\n");
 		ret = false;
 		goto err;
 	}
@@ -259,7 +259,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
 	orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
 					   &orig_dst_phys_addr, GFP_KERNEL);
 	if (!orig_dst_addr) {
-		dev_err(dev, "failed to allocate destination address\n");
+		dev_err(dev, "Failed to allocate destination address\n");
 		ret = false;
 		goto err_orig_src_addr;
 	}
@@ -321,7 +321,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
 	orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
 				       GFP_KERNEL);
 	if (!orig_addr) {
-		dev_err(dev, "failed to allocate address\n");
+		dev_err(dev, "Failed to allocate address\n");
 		ret = false;
 		goto err;
 	}
@@ -382,7 +382,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
 	orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
 				       GFP_KERNEL);
 	if (!orig_addr) {
-		dev_err(dev, "failed to allocate destination address\n");
+		dev_err(dev, "Failed to allocate destination address\n");
 		ret = false;
 		goto err;
 	}
@@ -513,31 +513,31 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
 	if (!no_msi) {
 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
 		if (irq < 0)
-			dev_err(dev, "failed to get MSI interrupts\n");
+			dev_err(dev, "Failed to get MSI interrupts\n");
 		test->num_irqs = irq;
 	}
 
 	err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
 			       IRQF_SHARED, DRV_MODULE_NAME, test);
 	if (err) {
-		dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
+		dev_err(dev, "Failed to request IRQ %d\n", pdev->irq);
 		goto err_disable_msi;
 	}
 
 	for (i = 1; i < irq; i++) {
-		err = devm_request_irq(dev, pdev->irq + i,
+		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
 				       pci_endpoint_test_irqhandler,
 				       IRQF_SHARED, DRV_MODULE_NAME, test);
 		if (err)
 			dev_err(dev, "failed to request IRQ %d for MSI %d\n",
-				pdev->irq + i, i + 1);
+				pci_irq_vector(pdev, i), i + 1);
 	}
 
 	for (bar = BAR_0; bar <= BAR_5; bar++) {
 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
 			base = pci_ioremap_bar(pdev, bar);
 			if (!base) {
-				dev_err(dev, "failed to read BAR%d\n", bar);
+				dev_err(dev, "Failed to read BAR%d\n", bar);
 				WARN_ON(bar == test_reg_bar);
 			}
 			test->bar[bar] = base;
@@ -557,7 +557,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
 	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
 	if (id < 0) {
 		err = id;
-		dev_err(dev, "unable to get id\n");
+		dev_err(dev, "Unable to get id\n");
 		goto err_iounmap;
 	}
 
@@ -573,7 +573,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
 
 	err = misc_register(misc_device);
 	if (err) {
-		dev_err(dev, "failed to register device\n");
+		dev_err(dev, "Failed to register device\n");
 		goto err_kfree_name;
 	}
 
@@ -592,7 +592,7 @@ err_iounmap:
 	}
 
 	for (i = 0; i < irq; i++)
-		devm_free_irq(dev, pdev->irq + i, test);
+		devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
 
 err_disable_msi:
 	pci_disable_msi(pdev);
@@ -625,7 +625,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
 			pci_iounmap(pdev, test->bar[bar]);
 	}
 	for (i = 0; i < test->num_irqs; i++)
-		devm_free_irq(&pdev->dev, pdev->irq + i, test);
+		devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
 	pci_disable_msi(pdev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
@@ -634,6 +634,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
 static const struct pci_device_id pci_endpoint_test_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
 	{ }
 };
 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a822e70c2af3..f2af87d70594 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3386,32 +3386,6 @@ err_disable_device:
 }
 
 /*****************************************************************************/
-static int ena_sriov_configure(struct pci_dev *dev, int numvfs)
-{
-	int rc;
-
-	if (numvfs > 0) {
-		rc = pci_enable_sriov(dev, numvfs);
-		if (rc != 0) {
-			dev_err(&dev->dev,
-				"pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
-				numvfs, rc);
-			return rc;
-		}
-
-		return numvfs;
-	}
-
-	if (numvfs == 0) {
-		pci_disable_sriov(dev);
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-/*****************************************************************************/
-/*****************************************************************************/
 
 /* ena_remove - Device Removal Routine
  * @pdev: PCI device information struct
@@ -3526,7 +3500,7 @@ static struct pci_driver ena_pci_driver = {
 	.suspend    = ena_suspend,
 	.resume     = ena_resume,
 #endif
-	.sriov_configure = ena_sriov_configure,
+	.sriov_configure = pci_sriov_configure_simple,
 };
 
 static int __init ena_init(void)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c766ae23bc74..5b1ed240bf18 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13922,8 +13922,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 {
 	struct net_device *dev = NULL;
 	struct bnx2x *bp;
-	enum pcie_link_width pcie_width;
-	enum pci_bus_speed pcie_speed;
 	int rc, max_non_def_sbs;
 	int rx_count, tx_count, rss_count, doorbell_size;
 	int max_cos_est;
@@ -14091,21 +14089,12 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 		dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
 		rtnl_unlock();
 	}
-	if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
-	    pcie_speed == PCI_SPEED_UNKNOWN ||
-	    pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
-		BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
-	else
-		BNX2X_DEV_INFO(
-		       "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
-		       board_info[ent->driver_data].name,
-		       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
-		       pcie_width,
-		       pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
-		       pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
-		       pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
-		       "Unknown",
-		       dev->base_addr, bp->pdev->irq, dev->dev_addr);
+	BNX2X_DEV_INFO(
+	       "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
+	       board_info[ent->driver_data].name,
+	       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+	       dev->base_addr, bp->pdev->irq, dev->dev_addr);
+	pcie_print_link_status(bp->pdev);
 
 	bnx2x_register_phc(bp);
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index dfa0839f6656..176fc9f4d7de 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -8685,22 +8685,6 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
 	return rc;
 }
 
-static void bnxt_parse_log_pcie_link(struct bnxt *bp)
-{
-	enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
-	enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
-
-	if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
-	    speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
-		netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
-	else
-		netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
-			    speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
-			    speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
-			    speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
-			    "Unknown", width);
-}
-
 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int version_printed;
@@ -8915,8 +8899,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
 		    board_info[ent->driver_data].name,
 		    (long)pci_resource_start(pdev, 0), dev->dev_addr);
-
-	bnxt_parse_log_pcie_link(bp);
+	pcie_print_link_status(pdev);
 
 	return 0;
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0efae2030e71..35cb3ae4f7b6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5066,79 +5066,6 @@ static int init_rss(struct adapter *adap)
 	return 0;
 }
 
-static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
-					enum pci_bus_speed *speed,
-					enum pcie_link_width *width)
-{
-	u32 lnkcap1, lnkcap2;
-	int err1, err2;
-
-#define  PCIE_MLW_CAP_SHIFT 4   /* start of MLW mask in link capabilities */
-
-	*speed = PCI_SPEED_UNKNOWN;
-	*width = PCIE_LNK_WIDTH_UNKNOWN;
-
-	err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
-					  &lnkcap1);
-	err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
-					  &lnkcap2);
-	if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
-		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
-			*speed = PCIE_SPEED_8_0GT;
-		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
-			*speed = PCIE_SPEED_5_0GT;
-		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
-			*speed = PCIE_SPEED_2_5GT;
-	}
-	if (!err1) {
-		*width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
-		if (!lnkcap2) { /* pre-r3.0 */
-			if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
-				*speed = PCIE_SPEED_5_0GT;
-			else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
-				*speed = PCIE_SPEED_2_5GT;
-		}
-	}
-
-	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
-		return err1 ? err1 : err2 ? err2 : -EINVAL;
-	return 0;
-}
-
-static void cxgb4_check_pcie_caps(struct adapter *adap)
-{
-	enum pcie_link_width width, width_cap;
-	enum pci_bus_speed speed, speed_cap;
-
-#define PCIE_SPEED_STR(speed) \
-	(speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
-	 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
-	 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
-	 "Unknown")
-
-	if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
-		dev_warn(adap->pdev_dev,
-			 "Unable to determine PCIe device BW capabilities\n");
-		return;
-	}
-
-	if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
-	    speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
-		dev_warn(adap->pdev_dev,
-			 "Unable to determine PCI Express bandwidth.\n");
-		return;
-	}
-
-	dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
-		 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
-	dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
-		 width, width_cap);
-	if (speed < speed_cap || width < width_cap)
-		dev_info(adap->pdev_dev,
-			 "A slot with more lanes and/or higher speed is "
-			 "suggested for optimal performance.\n");
-}
-
 /* Dump basic information about the adapter */
 static void print_adapter_info(struct adapter *adapter)
 {
@@ -5798,7 +5725,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	}
 
 	/* check for PCI Express bandwidth capabiltites */
-	cxgb4_check_pcie_caps(adapter);
+	pcie_print_link_status(pdev);
 
 	err = init_rss(adapter);
 	if (err)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 38b4e4899490..4929f7265598 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -245,9 +245,6 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
 				     int expected_gts)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	int max_gts = 0;
-	enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
-	enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
 	struct pci_dev *pdev;
 
 	/* Some devices are not connected over PCIe and thus do not negotiate
@@ -263,49 +260,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
 	else
 		pdev = adapter->pdev;
 
-	if (pcie_get_minimum_link(pdev, &speed, &width) ||
-	    speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
-		e_dev_warn("Unable to determine PCI Express bandwidth.\n");
-		return;
-	}
-
-	switch (speed) {
-	case PCIE_SPEED_2_5GT:
-		/* 8b/10b encoding reduces max throughput by 20% */
-		max_gts = 2 * width;
-		break;
-	case PCIE_SPEED_5_0GT:
-		/* 8b/10b encoding reduces max throughput by 20% */
-		max_gts = 4 * width;
-		break;
-	case PCIE_SPEED_8_0GT:
-		/* 128b/130b encoding reduces throughput by less than 2% */
-		max_gts = 8 * width;
-		break;
-	default:
-		e_dev_warn("Unable to determine PCI Express bandwidth.\n");
-		return;
-	}
-
-	e_dev_info("PCI Express bandwidth of %dGT/s available\n",
-		   max_gts);
-	e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
-		   (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
-		    speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
-		    speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
-		    "Unknown"),
-		   width,
-		   (speed == PCIE_SPEED_2_5GT ? "20%" :
-		    speed == PCIE_SPEED_5_0GT ? "20%" :
-		    speed == PCIE_SPEED_8_0GT ? "<2%" :
-		    "Unknown"));
-
-	if (max_gts < expected_gts) {
-		e_dev_warn("This is not sufficient for optimal performance of this card.\n");
-		e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
-			expected_gts);
-		e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
-	}
+	pcie_print_link_status(pdev);
 }
 
 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e526437bacbf..d234de5505ea 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2630,24 +2630,6 @@ static void nvme_remove(struct pci_dev *pdev)
 	nvme_put_ctrl(&dev->ctrl);
 }
 
-static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
-{
-	int ret = 0;
-
-	if (numvfs == 0) {
-		if (pci_vfs_assigned(pdev)) {
-			dev_warn(&pdev->dev,
-				"Cannot disable SR-IOV VFs while assigned\n");
-			return -EPERM;
-		}
-		pci_disable_sriov(pdev);
-		return 0;
-	}
-
-	ret = pci_enable_sriov(pdev, numvfs);
-	return ret ? ret : numvfs;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int nvme_suspend(struct device *dev)
 {
@@ -2774,7 +2756,7 @@ static struct pci_driver nvme_driver = {
 	.driver		= {
 		.pm	= &nvme_dev_pm_ops,
 	},
-	.sriov_configure = nvme_pci_sriov_configure,
+	.sriov_configure = pci_sriov_configure_simple,
 	.err_handler	= &nvme_err_handler,
 };
 
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 29a487f31dae..b2f07635e94d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -67,6 +67,18 @@ config PCI_STUB
 
 	  When in doubt, say N.
 
+config PCI_PF_STUB
+	tristate "PCI PF Stub driver"
+	depends on PCI
+	depends on PCI_IOV
+	help
+	  Say Y or M here if you want to enable support for devices that
+	  require SR-IOV support, while at the same time the PF itself is
+	  not providing any actual services on the host itself such as
+	  storage or networking.
+
+	  When in doubt, say N.
+
 config XEN_PCIDEV_FRONTEND
         tristate "Xen PCI Frontend"
         depends on PCI && X86 && XEN
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 952addc7bacf..84c9eef6b1c3 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PCI_LABEL)		+= pci-label.o
 obj-$(CONFIG_X86_INTEL_MID)	+= pci-mid.o
 obj-$(CONFIG_PCI_SYSCALL)	+= syscall.o
 obj-$(CONFIG_PCI_STUB)		+= pci-stub.o
+obj-$(CONFIG_PCI_PF_STUB)	+= pci-pf-stub.o
 obj-$(CONFIG_PCI_ECAM)		+= ecam.o
 obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
 
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 89305b569d3d..4923a2a8e14b 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -20,6 +20,9 @@ void pci_ats_init(struct pci_dev *dev)
 {
 	int pos;
 
+	if (pci_ats_disabled())
+		return;
+
 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
 	if (!pos)
 		return;
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index 2f3f5c50aa48..16f52c626b4b 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -1,13 +1,13 @@
 # SPDX-License-Identifier: GPL-2.0
 
 menu "DesignWare PCI Core Support"
+	depends on PCI
 
 config PCIE_DW
 	bool
 
 config PCIE_DW_HOST
         bool
-	depends on PCI
 	depends on PCI_MSI_IRQ_DOMAIN
         select PCIE_DW
 
@@ -22,7 +22,7 @@ config PCI_DRA7XX
 config PCI_DRA7XX_HOST
 	bool "TI DRA7xx PCIe controller Host Mode"
 	depends on SOC_DRA7XX || COMPILE_TEST
-	depends on PCI && PCI_MSI_IRQ_DOMAIN
+	depends on PCI_MSI_IRQ_DOMAIN
 	depends on OF && HAS_IOMEM && TI_PIPE3
 	select PCIE_DW_HOST
 	select PCI_DRA7XX
@@ -51,50 +51,62 @@ config PCI_DRA7XX_EP
 	  This uses the DesignWare core.
 
 config PCIE_DW_PLAT
-	bool "Platform bus based DesignWare PCIe Controller"
-	depends on PCI
-	depends on PCI_MSI_IRQ_DOMAIN
-	select PCIE_DW_HOST
-	---help---
-	 This selects the DesignWare PCIe controller support. Select this if
-	 you have a PCIe controller on Platform bus.
+	bool
 
-	 If you have a controller with this interface, say Y or M here.
+config PCIE_DW_PLAT_HOST
+	bool "Platform bus based DesignWare PCIe Controller - Host mode"
+	depends on PCI && PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PCIE_DW_PLAT
+	default y
+	help
+	  Enables support for the PCIe controller in the Designware IP to
+	  work in host mode. There are two instances of PCIe controller in
+	  Designware IP.
+	  This controller can work either as EP or RC. In order to enable
+	  host-specific features PCIE_DW_PLAT_HOST must be selected and in
+	  order to enable device-specific features PCI_DW_PLAT_EP must be
+	  selected.
 
-	 If unsure, say N.
+config PCIE_DW_PLAT_EP
+	bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
+	depends on PCI && PCI_MSI_IRQ_DOMAIN
+	depends on PCI_ENDPOINT
+	select PCIE_DW_EP
+	select PCIE_DW_PLAT
+	help
+	  Enables support for the PCIe controller in the Designware IP to
+	  work in endpoint mode. There are two instances of PCIe controller
+	  in Designware IP.
+	  This controller can work either as EP or RC. In order to enable
+	  host-specific features PCIE_DW_PLAT_HOST must be selected and in
+	  order to enable device-specific features PCI_DW_PLAT_EP must be
+	  selected.
 
 config PCI_EXYNOS
 	bool "Samsung Exynos PCIe controller"
-	depends on PCI
-	depends on SOC_EXYNOS5440
+	depends on SOC_EXYNOS5440 || COMPILE_TEST
 	depends on PCI_MSI_IRQ_DOMAIN
-	select PCIEPORTBUS
 	select PCIE_DW_HOST
 
 config PCI_IMX6
 	bool "Freescale i.MX6 PCIe controller"
-	depends on PCI
-	depends on SOC_IMX6Q
+	depends on SOC_IMX6Q || (ARM && COMPILE_TEST)
 	depends on PCI_MSI_IRQ_DOMAIN
-	select PCIEPORTBUS
 	select PCIE_DW_HOST
 
 config PCIE_SPEAR13XX
 	bool "STMicroelectronics SPEAr PCIe controller"
-	depends on PCI
-	depends on ARCH_SPEAR13XX
+	depends on ARCH_SPEAR13XX || COMPILE_TEST
 	depends on PCI_MSI_IRQ_DOMAIN
-	select PCIEPORTBUS
 	select PCIE_DW_HOST
 	help
 	  Say Y here if you want PCIe support on SPEAr13XX SoCs.
 
 config PCI_KEYSTONE
 	bool "TI Keystone PCIe controller"
-	depends on PCI
-	depends on ARCH_KEYSTONE
+	depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST)
 	depends on PCI_MSI_IRQ_DOMAIN
-	select PCIEPORTBUS
 	select PCIE_DW_HOST
 	help
 	  Say Y here if you want to enable PCI controller support on Keystone
@@ -104,8 +116,7 @@ config PCI_KEYSTONE
 
 config PCI_LAYERSCAPE
 	bool "Freescale Layerscape PCIe controller"
-	depends on PCI
-	depends on OF && (ARM || ARCH_LAYERSCAPE)
+	depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
 	depends on PCI_MSI_IRQ_DOMAIN
 	select MFD_SYSCON
 	select PCIE_DW_HOST
@@ -113,11 +124,9 @@ config PCI_LAYERSCAPE
 	  Say Y here if you want PCIe controller support on Layerscape SoCs.
 
 config PCI_HISI
-	depends on OF && ARM64
+	depends on OF && (ARM64 || COMPILE_TEST)
 	bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
-	depends on PCI
 	depends on PCI_MSI_IRQ_DOMAIN
-	select PCIEPORTBUS
 	select PCIE_DW_HOST
 	select PCI_HOST_COMMON
 	help
@@ -126,10 +135,8 @@ config PCI_HISI
 
 config PCIE_QCOM
 	bool "Qualcomm PCIe controller"
-	depends on PCI
-	depends on ARCH_QCOM && OF
+	depends on OF && (ARCH_QCOM || COMPILE_TEST)
 	depends on PCI_MSI_IRQ_DOMAIN
-	select PCIEPORTBUS
 	select PCIE_DW_HOST
 	help
 	  Say Y here to enable PCIe controller support on Qualcomm SoCs. The
@@ -138,10 +145,8 @@ config PCIE_QCOM
 
 config PCIE_ARMADA_8K
 	bool "Marvell Armada-8K PCIe controller"
-	depends on PCI
-	depends on ARCH_MVEBU
+	depends on ARCH_MVEBU || COMPILE_TEST
 	depends on PCI_MSI_IRQ_DOMAIN
-	select PCIEPORTBUS
 	select PCIE_DW_HOST
 	help
 	  Say Y here if you want to enable PCIe controller support on
@@ -154,9 +159,8 @@ config PCIE_ARTPEC6
 
 config PCIE_ARTPEC6_HOST
 	bool "Axis ARTPEC-6 PCIe controller Host Mode"
-	depends on MACH_ARTPEC6
-	depends on PCI && PCI_MSI_IRQ_DOMAIN
-	select PCIEPORTBUS
+	depends on MACH_ARTPEC6 || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
 	select PCIE_DW_HOST
 	select PCIE_ARTPEC6
 	help
@@ -165,7 +169,7 @@ config PCIE_ARTPEC6_HOST
 
 config PCIE_ARTPEC6_EP
 	bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
-	depends on MACH_ARTPEC6
+	depends on MACH_ARTPEC6 || COMPILE_TEST
 	depends on PCI_ENDPOINT
 	select PCIE_DW_EP
 	select PCIE_ARTPEC6
@@ -174,11 +178,9 @@ config PCIE_ARTPEC6_EP
 	  endpoint mode. This uses the DesignWare core.
 
 config PCIE_KIRIN
-	depends on OF && ARM64
+	depends on OF && (ARM64 || COMPILE_TEST)
 	bool "HiSilicon Kirin series SoCs PCIe controllers"
 	depends on PCI_MSI_IRQ_DOMAIN
-	depends on PCI
-	select PCIEPORTBUS
 	select PCIE_DW_HOST
 	help
 	  Say Y here if you want PCIe controller support
@@ -186,10 +188,8 @@ config PCIE_KIRIN
 
 config PCIE_HISI_STB
 	bool "HiSilicon STB SoCs PCIe controllers"
-	depends on ARCH_HISI
-	depends on PCI
+	depends on ARCH_HISI || COMPILE_TEST
 	depends on PCI_MSI_IRQ_DOMAIN
-	select PCIEPORTBUS
 	select PCIE_DW_HOST
 	help
           Say Y here if you want PCIe controller support on HiSilicon STB SoCs
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index ed8558d638e5..f688204e50c5 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -27,6 +27,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 
+#include "../pci.h"
 #include "pcie-designware.h"
 
 /* PCIe controller wrapper DRA7XX configuration registers */
@@ -406,14 +407,14 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
 	ep->ops = &pcie_ep_ops;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
-	pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
-	if (!pci->dbi_base)
-		return -ENOMEM;
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
-	pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
-	if (!pci->dbi_base2)
-		return -ENOMEM;
+	pci->dbi_base2 = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base2))
+		return PTR_ERR(pci->dbi_base2);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
 	if (!res)
@@ -459,9 +460,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
 		return ret;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
-	pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
-	if (!pci->dbi_base)
-		return -ENOMEM;
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
 
 	pp->ops = &dra7xx_pcie_host_ops;
 
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 4818ef875f8a..80f604602783 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -338,7 +338,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
 		break;
-	case IMX6QP: 		/* FALLTHROUGH */
+	case IMX6QP:		/* FALLTHROUGH */
 	case IMX6Q:
 		/* power up core phy and enable ref clock */
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index d55ae0716adf..3722a5f31e5e 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -89,7 +89,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
 	dw_pcie_setup_rc(pp);
 
 	if (dw_pcie_link_up(pci)) {
-		dev_err(dev, "Link already up\n");
+		dev_info(dev, "Link already up\n");
 		return 0;
 	}
 
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index b587352f8b9f..072fd7ecc29f 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -28,6 +28,7 @@
 struct armada8k_pcie {
 	struct dw_pcie *pci;
 	struct clk *clk;
+	struct clk *clk_reg;
 };
 
 #define PCIE_VENDOR_REGS_OFFSET		0x8000
@@ -229,26 +230,38 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
+	pcie->clk_reg = devm_clk_get(dev, "reg");
+	if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) {
+		ret = -EPROBE_DEFER;
+		goto fail;
+	}
+	if (!IS_ERR(pcie->clk_reg)) {
+		ret = clk_prepare_enable(pcie->clk_reg);
+		if (ret)
+			goto fail_clkreg;
+	}
+
 	/* Get the dw-pcie unit configuration/control registers base. */
 	base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
 	pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
 	if (IS_ERR(pci->dbi_base)) {
 		dev_err(dev, "couldn't remap regs base %p\n", base);
 		ret = PTR_ERR(pci->dbi_base);
-		goto fail;
+		goto fail_clkreg;
 	}
 
 	platform_set_drvdata(pdev, pcie);
 
 	ret = armada8k_add_pcie_port(pcie, pdev);
 	if (ret)
-		goto fail;
+		goto fail_clkreg;
 
 	return 0;
 
+fail_clkreg:
+	clk_disable_unprepare(pcie->clk_reg);
 fail:
-	if (!IS_ERR(pcie->clk))
-		clk_disable_unprepare(pcie->clk);
+	clk_disable_unprepare(pcie->clk);
 
 	return ret;
 }
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index e66cede2b5b7..321b56cfd5d0 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -463,9 +463,9 @@ static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
 	ep->ops = &pcie_ep_ops;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
-	pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
-	if (!pci->dbi_base2)
-		return -ENOMEM;
+	pci->dbi_base2 = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base2))
+		return PTR_ERR(pci->dbi_base2);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
 	if (!res)
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c
index f07678bf7cfc..1eec4415a77f 100644
--- a/drivers/pci/dwc/pcie-designware-ep.c
+++ b/drivers/pci/dwc/pcie-designware-ep.c
@@ -75,7 +75,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
 
 	free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
 	if (free_win >= ep->num_ib_windows) {
-		dev_err(pci->dev, "no free inbound window\n");
+		dev_err(pci->dev, "No free inbound window\n");
 		return -EINVAL;
 	}
 
@@ -100,7 +100,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
 
 	free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
 	if (free_win >= ep->num_ob_windows) {
-		dev_err(pci->dev, "no free outbound window\n");
+		dev_err(pci->dev, "No free outbound window\n");
 		return -EINVAL;
 	}
 
@@ -204,7 +204,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
 
 	ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
 	if (ret) {
-		dev_err(pci->dev, "failed to enable address\n");
+		dev_err(pci->dev, "Failed to enable address\n");
 		return ret;
 	}
 
@@ -348,21 +348,21 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
 
 	ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
 	if (ret < 0) {
-		dev_err(dev, "unable to read *num-ib-windows* property\n");
+		dev_err(dev, "Unable to read *num-ib-windows* property\n");
 		return ret;
 	}
 	if (ep->num_ib_windows > MAX_IATU_IN) {
-		dev_err(dev, "invalid *num-ib-windows*\n");
+		dev_err(dev, "Invalid *num-ib-windows*\n");
 		return -EINVAL;
 	}
 
 	ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
 	if (ret < 0) {
-		dev_err(dev, "unable to read *num-ob-windows* property\n");
+		dev_err(dev, "Unable to read *num-ob-windows* property\n");
 		return ret;
 	}
 	if (ep->num_ob_windows > MAX_IATU_OUT) {
-		dev_err(dev, "invalid *num-ob-windows*\n");
+		dev_err(dev, "Invalid *num-ob-windows*\n");
 		return -EINVAL;
 	}
 
@@ -389,7 +389,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
 
 	epc = devm_pci_epc_create(dev, &epc_ops);
 	if (IS_ERR(epc)) {
-		dev_err(dev, "failed to create epc device\n");
+		dev_err(dev, "Failed to create epc device\n");
 		return PTR_ERR(epc);
 	}
 
@@ -411,6 +411,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
 		return -ENOMEM;
 	}
 
+	epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER;
+	EPC_FEATURE_SET_BAR(epc->features, BAR_0);
+
 	ep->epc = epc;
 	epc_set_drvdata(epc, ep);
 	dw_pcie_setup(pci);
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 6c409079d514..cba1432e395d 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -15,6 +15,7 @@
 #include <linux/pci_regs.h>
 #include <linux/platform_device.h>
 
+#include "../pci.h"
 #include "pcie-designware.h"
 
 static struct pci_ops dw_pcie_ops;
@@ -83,18 +84,23 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
 
 	for (i = 0; i < num_ctrls; i++) {
-		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
-				    &val);
+		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+					(i * MSI_REG_CTRL_BLOCK_SIZE),
+				    4, &val);
 		if (!val)
 			continue;
 
 		ret = IRQ_HANDLED;
 		pos = 0;
-		while ((pos = find_next_bit((unsigned long *) &val, 32,
-					    pos)) != 32) {
-			irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
+		while ((pos = find_next_bit((unsigned long *) &val,
+					    MAX_MSI_IRQS_PER_CTRL,
+					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
+			irq = irq_find_mapping(pp->irq_domain,
+					       (i * MAX_MSI_IRQS_PER_CTRL) +
+					       pos);
 			generic_handle_irq(irq);
-			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
+			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+						(i * MSI_REG_CTRL_BLOCK_SIZE),
 					    4, 1 << pos);
 			pos++;
 		}
@@ -157,9 +163,9 @@ static void dw_pci_bottom_mask(struct irq_data *data)
 	if (pp->ops->msi_clear_irq) {
 		pp->ops->msi_clear_irq(pp, data->hwirq);
 	} else {
-		ctrl = data->hwirq / 32;
-		res = ctrl * 12;
-		bit = data->hwirq % 32;
+		ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
+		res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+		bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
 
 		pp->irq_status[ctrl] &= ~(1 << bit);
 		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
@@ -180,9 +186,9 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
 	if (pp->ops->msi_set_irq) {
 		pp->ops->msi_set_irq(pp, data->hwirq);
 	} else {
-		ctrl = data->hwirq / 32;
-		res = ctrl * 12;
-		bit = data->hwirq % 32;
+		ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
+		res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+		bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
 
 		pp->irq_status[ctrl] |= 1 << bit;
 		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
@@ -248,8 +254,10 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
 	unsigned long flags;
 
 	raw_spin_lock_irqsave(&pp->lock, flags);
+
 	bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
 			      order_base_2(nr_irqs));
+
 	raw_spin_unlock_irqrestore(&pp->lock, flags);
 }
 
@@ -266,7 +274,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
 	pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
 					       &dw_pcie_msi_domain_ops, pp);
 	if (!pp->irq_domain) {
-		dev_err(pci->dev, "failed to create IRQ domain\n");
+		dev_err(pci->dev, "Failed to create IRQ domain\n");
 		return -ENOMEM;
 	}
 
@@ -274,7 +282,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
 						   &dw_pcie_msi_domain_info,
 						   pp->irq_domain);
 	if (!pp->msi_domain) {
-		dev_err(pci->dev, "failed to create MSI domain\n");
+		dev_err(pci->dev, "Failed to create MSI domain\n");
 		irq_domain_remove(pp->irq_domain);
 		return -ENOMEM;
 	}
@@ -301,13 +309,13 @@ void dw_pcie_msi_init(struct pcie_port *pp)
 	page = alloc_page(GFP_KERNEL);
 	pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 	if (dma_mapping_error(dev, pp->msi_data)) {
-		dev_err(dev, "failed to map MSI data\n");
+		dev_err(dev, "Failed to map MSI data\n");
 		__free_page(page);
 		return;
 	}
 	msi_target = (u64)pp->msi_data;
 
-	/* program the msi_data */
+	/* Program the msi_data */
 	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
 			    lower_32_bits(msi_target));
 	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
@@ -330,19 +338,19 @@ int dw_pcie_host_init(struct pcie_port *pp)
 
 	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
 	if (cfg_res) {
-		pp->cfg0_size = resource_size(cfg_res) / 2;
-		pp->cfg1_size = resource_size(cfg_res) / 2;
+		pp->cfg0_size = resource_size(cfg_res) >> 1;
+		pp->cfg1_size = resource_size(cfg_res) >> 1;
 		pp->cfg0_base = cfg_res->start;
 		pp->cfg1_base = cfg_res->start + pp->cfg0_size;
 	} else if (!pp->va_cfg0_base) {
-		dev_err(dev, "missing *config* reg space\n");
+		dev_err(dev, "Missing *config* reg space\n");
 	}
 
 	bridge = pci_alloc_host_bridge(0);
 	if (!bridge)
 		return -ENOMEM;
 
-	ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
 					&bridge->windows, &pp->io_base);
 	if (ret)
 		return ret;
@@ -357,7 +365,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 		case IORESOURCE_IO:
 			ret = pci_remap_iospace(win->res, pp->io_base);
 			if (ret) {
-				dev_warn(dev, "error %d: failed to map resource %pR\n",
+				dev_warn(dev, "Error %d: failed to map resource %pR\n",
 					 ret, win->res);
 				resource_list_destroy_entry(win);
 			} else {
@@ -375,8 +383,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
 			break;
 		case 0:
 			pp->cfg = win->res;
-			pp->cfg0_size = resource_size(pp->cfg) / 2;
-			pp->cfg1_size = resource_size(pp->cfg) / 2;
+			pp->cfg0_size = resource_size(pp->cfg) >> 1;
+			pp->cfg1_size = resource_size(pp->cfg) >> 1;
 			pp->cfg0_base = pp->cfg->start;
 			pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
 			break;
@@ -391,7 +399,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 						pp->cfg->start,
 						resource_size(pp->cfg));
 		if (!pci->dbi_base) {
-			dev_err(dev, "error with ioremap\n");
+			dev_err(dev, "Error with ioremap\n");
 			ret = -ENOMEM;
 			goto error;
 		}
@@ -403,7 +411,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 		pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
 					pp->cfg0_base, pp->cfg0_size);
 		if (!pp->va_cfg0_base) {
-			dev_err(dev, "error with ioremap in function\n");
+			dev_err(dev, "Error with ioremap in function\n");
 			ret = -ENOMEM;
 			goto error;
 		}
@@ -414,7 +422,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 						pp->cfg1_base,
 						pp->cfg1_size);
 		if (!pp->va_cfg1_base) {
-			dev_err(dev, "error with ioremap\n");
+			dev_err(dev, "Error with ioremap\n");
 			ret = -ENOMEM;
 			goto error;
 		}
@@ -586,7 +594,7 @@ static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
 			return 0;
 	}
 
-	/* access only one slot on each root port */
+	/* Access only one slot on each root port */
 	if (bus->number == pp->root_bus_nr && dev > 0)
 		return 0;
 
@@ -650,13 +658,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
 
 	/* Initialize IRQ Status array */
 	for (ctrl = 0; ctrl < num_ctrls; ctrl++)
-		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * 12), 4,
-				    &pp->irq_status[ctrl]);
-	/* setup RC BARs */
+		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+					(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+				    4, &pp->irq_status[ctrl]);
+
+	/* Setup RC BARs */
 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
 
-	/* setup interrupt pins */
+	/* Setup interrupt pins */
 	dw_pcie_dbi_ro_wr_en(pci);
 	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
 	val &= 0xffff00ff;
@@ -664,13 +674,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
 	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
 	dw_pcie_dbi_ro_wr_dis(pci);
 
-	/* setup bus numbers */
+	/* Setup bus numbers */
 	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
 	val &= 0xff000000;
 	val |= 0x00ff0100;
 	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
 
-	/* setup command register */
+	/* Setup command register */
 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
 	val &= 0xffff0000;
 	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
@@ -683,7 +693,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
 	 * we should not program the ATU here.
 	 */
 	if (!pp->ops->rd_other_conf) {
-		/* get iATU unroll support */
+		/* Get iATU unroll support */
 		pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
 		dev_dbg(pci->dev, "iATU unroll: %s\n",
 			pci->iatu_unroll_enabled ? "enabled" : "disabled");
@@ -701,7 +711,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
 
 	/* Enable write permission for the DBI read-only register */
 	dw_pcie_dbi_ro_wr_en(pci);
-	/* program correct class for RC */
+	/* Program correct class for RC */
 	dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
 	/* Better disable write permission right after the update */
 	dw_pcie_dbi_ro_wr_dis(pci);
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 5416aa8a07a5..5937fed4c938 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -12,19 +12,29 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/resource.h>
 #include <linux/signal.h>
 #include <linux/types.h>
+#include <linux/regmap.h>
 
 #include "pcie-designware.h"
 
 struct dw_plat_pcie {
-	struct dw_pcie		*pci;
+	struct dw_pcie			*pci;
+	struct regmap			*regmap;
+	enum dw_pcie_device_mode	mode;
 };
 
+struct dw_plat_pcie_of_data {
+	enum dw_pcie_device_mode	mode;
+};
+
+static const struct of_device_id dw_plat_pcie_of_match[];
+
 static int dw_plat_pcie_host_init(struct pcie_port *pp)
 {
 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -38,13 +48,63 @@ static int dw_plat_pcie_host_init(struct pcie_port *pp)
 	return 0;
 }
 
+static void dw_plat_set_num_vectors(struct pcie_port *pp)
+{
+	pp->num_vectors = MAX_MSI_IRQS;
+}
+
 static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
 	.host_init = dw_plat_pcie_host_init,
+	.set_num_vectors = dw_plat_set_num_vectors,
+};
+
+static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
+{
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.start_link = dw_plat_pcie_establish_link,
 };
 
-static int dw_plat_add_pcie_port(struct pcie_port *pp,
+static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	enum pci_barno bar;
+
+	for (bar = BAR_0; bar <= BAR_5; bar++)
+		dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				     enum pci_epc_irq_type type,
+				     u8 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+		return -EINVAL;
+	case PCI_EPC_IRQ_MSI:
+		return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+	}
+
+	return 0;
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+	.ep_init = dw_plat_pcie_ep_init,
+	.raise_irq = dw_plat_pcie_ep_raise_irq,
+};
+
+static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
 				 struct platform_device *pdev)
 {
+	struct dw_pcie *pci = dw_plat_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
 	struct device *dev = &pdev->dev;
 	int ret;
 
@@ -63,15 +123,44 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
 
 	ret = dw_pcie_host_init(pp);
 	if (ret) {
-		dev_err(dev, "failed to initialize host\n");
+		dev_err(dev, "Failed to initialize host\n");
 		return ret;
 	}
 
 	return 0;
 }
 
-static const struct dw_pcie_ops dw_pcie_ops = {
-};
+static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
+			       struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci = dw_plat_pcie->pci;
+
+	ep = &pci->ep;
+	ep->ops = &pcie_ep_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+	pci->dbi_base2 = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base2))
+		return PTR_ERR(pci->dbi_base2);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "Failed to initialize endpoint\n");
+		return ret;
+	}
+	return 0;
+}
 
 static int dw_plat_pcie_probe(struct platform_device *pdev)
 {
@@ -80,6 +169,16 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
 	struct dw_pcie *pci;
 	struct resource *res;  /* Resource from DT */
 	int ret;
+	const struct of_device_id *match;
+	const struct dw_plat_pcie_of_data *data;
+	enum dw_pcie_device_mode mode;
+
+	match = of_match_device(dw_plat_pcie_of_match, dev);
+	if (!match)
+		return -EINVAL;
+
+	data = (struct dw_plat_pcie_of_data *)match->data;
+	mode = (enum dw_pcie_device_mode)data->mode;
 
 	dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
 	if (!dw_plat_pcie)
@@ -93,23 +192,59 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
 	pci->ops = &dw_pcie_ops;
 
 	dw_plat_pcie->pci = pci;
+	dw_plat_pcie->mode = mode;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	if (!res)
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	pci->dbi_base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(pci->dbi_base))
 		return PTR_ERR(pci->dbi_base);
 
 	platform_set_drvdata(pdev, dw_plat_pcie);
 
-	ret = dw_plat_add_pcie_port(&pci->pp, pdev);
-	if (ret < 0)
-		return ret;
+	switch (dw_plat_pcie->mode) {
+	case DW_PCIE_RC_TYPE:
+		if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
+			return -ENODEV;
+
+		ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
+		if (ret < 0)
+			return ret;
+		break;
+	case DW_PCIE_EP_TYPE:
+		if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
+			return -ENODEV;
+
+		ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
+		if (ret < 0)
+			return ret;
+		break;
+	default:
+		dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+	}
 
 	return 0;
 }
 
+static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
+	.mode = DW_PCIE_EP_TYPE,
+};
+
 static const struct of_device_id dw_plat_pcie_of_match[] = {
-	{ .compatible = "snps,dw-pcie", },
+	{
+		.compatible = "snps,dw-pcie",
+		.data = &dw_plat_pcie_rc_of_data,
+	},
+	{
+		.compatible = "snps,dw-pcie-ep",
+		.data = &dw_plat_pcie_ep_of_data,
+	},
 	{},
 };
 
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
index 1b7282e5b494..778c4f76a884 100644
--- a/drivers/pci/dwc/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -69,7 +69,7 @@ u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
 
 	ret = dw_pcie_read(base + reg, size, &val);
 	if (ret)
-		dev_err(pci->dev, "read DBI address failed\n");
+		dev_err(pci->dev, "Read DBI address failed\n");
 
 	return val;
 }
@@ -86,7 +86,7 @@ void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
 
 	ret = dw_pcie_write(base + reg, size, val);
 	if (ret)
-		dev_err(pci->dev, "write DBI address failed\n");
+		dev_err(pci->dev, "Write DBI address failed\n");
 }
 
 static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
@@ -137,7 +137,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
 
 		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
 	}
-	dev_err(pci->dev, "outbound iATU is not being enabled\n");
+	dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 }
 
 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
@@ -180,7 +180,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
 
 		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
 	}
-	dev_err(pci->dev, "outbound iATU is not being enabled\n");
+	dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 }
 
 static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
@@ -238,7 +238,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
 
 		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
 	}
-	dev_err(pci->dev, "inbound iATU is not being enabled\n");
+	dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 
 	return -EBUSY;
 }
@@ -284,7 +284,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
 
 		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
 	}
-	dev_err(pci->dev, "inbound iATU is not being enabled\n");
+	dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 
 	return -EBUSY;
 }
@@ -313,16 +313,16 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
 {
 	int retries;
 
-	/* check if the link is up or not */
+	/* Check if the link is up or not */
 	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
 		if (dw_pcie_link_up(pci)) {
-			dev_info(pci->dev, "link up\n");
+			dev_info(pci->dev, "Link up\n");
 			return 0;
 		}
 		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
 	}
 
-	dev_err(pci->dev, "phy link never came up\n");
+	dev_err(pci->dev, "Phy link never came up\n");
 
 	return -ETIMEDOUT;
 }
@@ -351,7 +351,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
 	if (ret)
 		lanes = 0;
 
-	/* set the number of lanes */
+	/* Set the number of lanes */
 	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
 	val &= ~PORT_LINK_MODE_MASK;
 	switch (lanes) {
@@ -373,7 +373,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
 	}
 	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
 
-	/* set link width speed control register */
+	/* Set link width speed control register */
 	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
 	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
 	switch (lanes) {
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index fe811dbc12cf..bee4e2535a61 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -110,6 +110,7 @@
 #define MAX_MSI_IRQS			256
 #define MAX_MSI_IRQS_PER_CTRL		32
 #define MAX_MSI_CTRLS			(MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
+#define MSI_REG_CTRL_BLOCK_SIZE		12
 #define MSI_DEF_NUM_VECTORS		32
 
 /* Maximum number of inbound/outbound iATUs */
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 5897af7d3355..a1d0198081a6 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -10,7 +10,7 @@
 
 #include <linux/clk.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
@@ -19,6 +19,7 @@
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
 #include <linux/regulator/consumer.h>
@@ -869,7 +870,7 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
 
 	/* enable PCIe clocks and resets */
 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
-	val &= !BIT(0);
+	val &= ~BIT(0);
 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
 
 	/* change DBI base address */
@@ -1088,6 +1089,7 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
 	int ret;
 
+	pm_runtime_get_sync(pci->dev);
 	qcom_ep_reset_assert(pcie);
 
 	ret = pcie->ops->init(pcie);
@@ -1124,6 +1126,7 @@ err_disable_phy:
 	phy_power_off(pcie->phy);
 err_deinit:
 	pcie->ops->deinit(pcie);
+	pm_runtime_put(pci->dev);
 
 	return ret;
 }
@@ -1212,6 +1215,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
 	if (!pci)
 		return -ENOMEM;
 
+	pm_runtime_enable(dev);
 	pci->dev = dev;
 	pci->ops = &dw_pcie_ops;
 	pp = &pci->pp;
@@ -1257,14 +1261,17 @@ static int qcom_pcie_probe(struct platform_device *pdev)
 	}
 
 	ret = phy_init(pcie->phy);
-	if (ret)
+	if (ret) {
+		pm_runtime_disable(&pdev->dev);
 		return ret;
+	}
 
 	platform_set_drvdata(pdev, pcie);
 
 	ret = dw_pcie_host_init(pp);
 	if (ret) {
 		dev_err(dev, "cannot initialize host\n");
+		pm_runtime_disable(&pdev->dev);
 		return ret;
 	}
 
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 7cef85124325..63ed706445b9 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -87,7 +87,7 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
 
 	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
 	if (!src_addr) {
-		dev_err(dev, "failed to allocate source address\n");
+		dev_err(dev, "Failed to allocate source address\n");
 		reg->status = STATUS_SRC_ADDR_INVALID;
 		ret = -ENOMEM;
 		goto err;
@@ -96,14 +96,14 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
 	ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
 			       reg->size);
 	if (ret) {
-		dev_err(dev, "failed to map source address\n");
+		dev_err(dev, "Failed to map source address\n");
 		reg->status = STATUS_SRC_ADDR_INVALID;
 		goto err_src_addr;
 	}
 
 	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
 	if (!dst_addr) {
-		dev_err(dev, "failed to allocate destination address\n");
+		dev_err(dev, "Failed to allocate destination address\n");
 		reg->status = STATUS_DST_ADDR_INVALID;
 		ret = -ENOMEM;
 		goto err_src_map_addr;
@@ -112,7 +112,7 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
 	ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
 			       reg->size);
 	if (ret) {
-		dev_err(dev, "failed to map destination address\n");
+		dev_err(dev, "Failed to map destination address\n");
 		reg->status = STATUS_DST_ADDR_INVALID;
 		goto err_dst_addr;
 	}
@@ -149,7 +149,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
 
 	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
 	if (!src_addr) {
-		dev_err(dev, "failed to allocate address\n");
+		dev_err(dev, "Failed to allocate address\n");
 		reg->status = STATUS_SRC_ADDR_INVALID;
 		ret = -ENOMEM;
 		goto err;
@@ -158,7 +158,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
 	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
 			       reg->size);
 	if (ret) {
-		dev_err(dev, "failed to map address\n");
+		dev_err(dev, "Failed to map address\n");
 		reg->status = STATUS_SRC_ADDR_INVALID;
 		goto err_addr;
 	}
@@ -201,7 +201,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
 
 	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
 	if (!dst_addr) {
-		dev_err(dev, "failed to allocate address\n");
+		dev_err(dev, "Failed to allocate address\n");
 		reg->status = STATUS_DST_ADDR_INVALID;
 		ret = -ENOMEM;
 		goto err;
@@ -210,7 +210,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
 	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
 			       reg->size);
 	if (ret) {
-		dev_err(dev, "failed to map address\n");
+		dev_err(dev, "Failed to map address\n");
 		reg->status = STATUS_DST_ADDR_INVALID;
 		goto err_addr;
 	}
@@ -230,7 +230,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
 	 * wait 1ms inorder for the write to complete. Without this delay L3
 	 * error in observed in the host system.
 	 */
-	mdelay(1);
+	usleep_range(1000, 2000);
 
 	kfree(buf);
 
@@ -379,7 +379,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
 		ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
 		if (ret) {
 			pci_epf_free_space(epf, epf_test->reg[bar], bar);
-			dev_err(dev, "failed to set BAR%d\n", bar);
+			dev_err(dev, "Failed to set BAR%d\n", bar);
 			if (bar == test_reg_bar)
 				return ret;
 		}
@@ -406,7 +406,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
 	base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
 				   test_reg_bar);
 	if (!base) {
-		dev_err(dev, "failed to allocated register space\n");
+		dev_err(dev, "Failed to allocated register space\n");
 		return -ENOMEM;
 	}
 	epf_test->reg[test_reg_bar] = base;
@@ -416,7 +416,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
 			continue;
 		base = pci_epf_alloc_space(epf, bar_size[bar], bar);
 		if (!base)
-			dev_err(dev, "failed to allocate space for BAR%d\n",
+			dev_err(dev, "Failed to allocate space for BAR%d\n",
 				bar);
 		epf_test->reg[bar] = base;
 	}
@@ -435,9 +435,16 @@ static int pci_epf_test_bind(struct pci_epf *epf)
 	if (WARN_ON_ONCE(!epc))
 		return -EINVAL;
 
+	if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
+		epf_test->linkup_notifier = false;
+	else
+		epf_test->linkup_notifier = true;
+
+	epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
+
 	ret = pci_epc_write_header(epc, epf->func_no, header);
 	if (ret) {
-		dev_err(dev, "configuration header write failed\n");
+		dev_err(dev, "Configuration header write failed\n");
 		return ret;
 	}
 
@@ -519,7 +526,7 @@ static int __init pci_epf_test_init(void)
 					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
 	ret = pci_epf_register_driver(&test_driver);
 	if (ret) {
-		pr_err("failed to register pci epf test driver --> %d\n", ret);
+		pr_err("Failed to register pci epf test driver --> %d\n", ret);
 		return ret;
 	}
 
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 465b5f058b6d..523a8cab3bfb 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -15,6 +15,8 @@
 #include <linux/pci-epf.h>
 #include <linux/pci-ep-cfs.h>
 
+static DEFINE_MUTEX(pci_epf_mutex);
+
 static struct bus_type pci_epf_bus_type;
 static const struct device_type pci_epf_type;
 
@@ -143,7 +145,13 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
  */
 void pci_epf_unregister_driver(struct pci_epf_driver *driver)
 {
-	pci_ep_cfs_remove_epf_group(driver->group);
+	struct config_group *group;
+
+	mutex_lock(&pci_epf_mutex);
+	list_for_each_entry(group, &driver->epf_group, group_entry)
+		pci_ep_cfs_remove_epf_group(group);
+	list_del(&driver->epf_group);
+	mutex_unlock(&pci_epf_mutex);
 	driver_unregister(&driver->driver);
 }
 EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
@@ -159,6 +167,8 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
 			      struct module *owner)
 {
 	int ret;
+	struct config_group *group;
+	const struct pci_epf_device_id *id;
 
 	if (!driver->ops)
 		return -EINVAL;
@@ -173,7 +183,16 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
 	if (ret)
 		return ret;
 
-	driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
+	INIT_LIST_HEAD(&driver->epf_group);
+
+	id = driver->id_table;
+	while (id->name[0]) {
+		group = pci_ep_cfs_add_epf_group(id->name);
+		mutex_lock(&pci_epf_mutex);
+		list_add_tail(&group->group_entry, &driver->epf_group);
+		mutex_unlock(&pci_epf_mutex);
+		id++;
+	}
 
 	return 0;
 }
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 0d0177ce436c..a96e23bda664 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -5,13 +5,14 @@ menu "PCI host controller drivers"
 
 config PCI_MVEBU
 	bool "Marvell EBU PCIe controller"
-	depends on ARCH_MVEBU || ARCH_DOVE
+	depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
+	depends on MVEBU_MBUS
 	depends on ARM
 	depends on OF
 
 config PCI_AARDVARK
 	bool "Aardvark PCIe controller"
-	depends on ARCH_MVEBU && ARM64
+	depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
 	depends on OF
 	depends on PCI_MSI_IRQ_DOMAIN
 	help
@@ -21,7 +22,7 @@ config PCI_AARDVARK
 
 config PCIE_XILINX_NWL
 	bool "NWL PCIe Core"
-	depends on ARCH_ZYNQMP
+	depends on ARCH_ZYNQMP || COMPILE_TEST
 	depends on PCI_MSI_IRQ_DOMAIN
 	help
 	 Say 'Y' here if you want kernel support for Xilinx
@@ -32,12 +33,11 @@ config PCIE_XILINX_NWL
 config PCI_FTPCI100
 	bool "Faraday Technology FTPCI100 PCI controller"
 	depends on OF
-	depends on ARM
 	default ARCH_GEMINI
 
 config PCI_TEGRA
 	bool "NVIDIA Tegra PCIe controller"
-	depends on ARCH_TEGRA
+	depends on ARCH_TEGRA || COMPILE_TEST
 	depends on PCI_MSI_IRQ_DOMAIN
 	help
 	  Say Y here if you want support for the PCIe host controller found
@@ -45,8 +45,8 @@ config PCI_TEGRA
 
 config PCI_RCAR_GEN2
 	bool "Renesas R-Car Gen2 Internal PCI controller"
-	depends on ARM
 	depends on ARCH_RENESAS || COMPILE_TEST
+	depends on ARM
 	help
 	  Say Y here if you want internal PCI support on R-Car Gen2 SoC.
 	  There are 3 internal PCI controllers available with a single
@@ -54,7 +54,7 @@ config PCI_RCAR_GEN2
 
 config PCIE_RCAR
 	bool "Renesas R-Car PCIe controller"
-	depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
+	depends on ARCH_RENESAS || COMPILE_TEST
 	depends on PCI_MSI_IRQ_DOMAIN
 	help
 	  Say Y here if you want PCIe controller support on R-Car SoCs.
@@ -65,25 +65,25 @@ config PCI_HOST_COMMON
 
 config PCI_HOST_GENERIC
 	bool "Generic PCI host controller"
-	depends on (ARM || ARM64) && OF
+	depends on OF
 	select PCI_HOST_COMMON
 	select IRQ_DOMAIN
+	select PCI_DOMAINS
 	help
 	  Say Y here if you want to support a simple generic PCI host
 	  controller, such as the one emulated by kvmtool.
 
 config PCIE_XILINX
 	bool "Xilinx AXI PCIe host bridge support"
-	depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC)
+	depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST
 	help
 	  Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
 	  Host Bridge driver.
 
 config PCI_XGENE
 	bool "X-Gene PCIe controller"
-	depends on ARM64
+	depends on ARM64 || COMPILE_TEST
 	depends on OF || (ACPI && PCI_QUIRKS)
-	select PCIEPORTBUS
 	help
 	  Say Y here if you want internal PCI support on APM X-Gene SoC.
 	  There are 5 internal PCIe ports available. Each port is GEN3 capable
@@ -101,7 +101,7 @@ config PCI_XGENE_MSI
 config PCI_V3_SEMI
 	bool "V3 Semiconductor PCI controller"
 	depends on OF
-	depends on ARM
+	depends on ARM || COMPILE_TEST
 	default ARCH_INTEGRATOR_AP
 
 config PCI_VERSATILE
@@ -147,8 +147,7 @@ config PCIE_IPROC_MSI
 
 config PCIE_ALTERA
 	bool "Altera PCIe controller"
-	depends on ARM || NIOS2
-	depends on OF_PCI
+	depends on ARM || NIOS2 || COMPILE_TEST
 	select PCI_DOMAINS
 	help
 	  Say Y here if you want to enable PCIe controller support on Altera
@@ -164,7 +163,7 @@ config PCIE_ALTERA_MSI
 
 config PCI_HOST_THUNDER_PEM
 	bool "Cavium Thunder PCIe controller to off-chip devices"
-	depends on ARM64
+	depends on ARM64 || COMPILE_TEST
 	depends on OF || (ACPI && PCI_QUIRKS)
 	select PCI_HOST_COMMON
 	help
@@ -172,29 +171,45 @@ config PCI_HOST_THUNDER_PEM
 
 config PCI_HOST_THUNDER_ECAM
 	bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
-	depends on ARM64
+	depends on ARM64 || COMPILE_TEST
 	depends on OF || (ACPI && PCI_QUIRKS)
 	select PCI_HOST_COMMON
 	help
 	  Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
 
 config PCIE_ROCKCHIP
-	tristate "Rockchip PCIe controller"
+	bool
+	depends on PCI
+
+config PCIE_ROCKCHIP_HOST
+	tristate "Rockchip PCIe host controller"
 	depends on ARCH_ROCKCHIP || COMPILE_TEST
 	depends on OF
 	depends on PCI_MSI_IRQ_DOMAIN
 	select MFD_SYSCON
+	select PCIE_ROCKCHIP
 	help
 	  Say Y here if you want internal PCI support on Rockchip SoC.
 	  There is 1 internal PCIe port available to support GEN2 with
 	  4 slots.
 
+config PCIE_ROCKCHIP_EP
+	bool "Rockchip PCIe endpoint controller"
+	depends on ARCH_ROCKCHIP || COMPILE_TEST
+	depends on OF
+	depends on PCI_ENDPOINT
+	select MFD_SYSCON
+	select PCIE_ROCKCHIP
+	help
+	  Say Y here if you want to support Rockchip PCIe controller in
+	  endpoint mode on Rockchip SoC. There is 1 internal PCIe port
+	  available to support GEN2 with 4 slots.
+
 config PCIE_MEDIATEK
 	bool "MediaTek PCIe controller"
-	depends on (ARM || ARM64) && (ARCH_MEDIATEK || COMPILE_TEST)
+	depends on ARCH_MEDIATEK || COMPILE_TEST
 	depends on OF
-	depends on PCI
-	select PCIEPORTBUS
+	depends on PCI_MSI_IRQ_DOMAIN
 	help
 	  Say Y here if you want to enable PCIe controller support on
 	  MediaTek SoCs.
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 3b1059190867..11d21b026d37 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
 obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
 obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
 obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
+obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
 obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
 obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
 obj-$(CONFIG_VMD) += vmd.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 9abf549631b4..d3172d5d3d35 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -19,6 +19,8 @@
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
 
+#include "../pci.h"
+
 /* PCIe core registers */
 #define PCIE_CORE_CMD_STATUS_REG				0x4
 #define     PCIE_CORE_CMD_IO_ACCESS_EN				BIT(0)
@@ -822,14 +824,13 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
 {
 	int err, res_valid = 0;
 	struct device *dev = &pcie->pdev->dev;
-	struct device_node *np = dev->of_node;
 	struct resource_entry *win, *tmp;
 	resource_size_t iobase;
 
 	INIT_LIST_HEAD(&pcie->resources);
 
-	err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
-					       &iobase);
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    &pcie->resources, &iobase);
 	if (err)
 		return err;
 
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index 5008fd87956a..a1ebe9ed441f 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -28,6 +28,8 @@
 #include <linux/irq.h>
 #include <linux/clk.h>
 
+#include "../pci.h"
+
 /*
  * Special configuration registers directly in the first few words
  * in I/O space.
@@ -476,8 +478,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
 	if (IS_ERR(p->base))
 		return PTR_ERR(p->base);
 
-	ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
-					       &res, &io_base);
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    &res, &io_base);
 	if (ret)
 		return ret;
 
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 5d028f53fdcd..d8f10451f273 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -101,5 +101,18 @@ int pci_host_common_probe(struct platform_device *pdev,
 		return ret;
 	}
 
+	platform_set_drvdata(pdev, bridge->bus);
+	return 0;
+}
+
+int pci_host_common_remove(struct platform_device *pdev)
+{
+	struct pci_bus *bus = platform_get_drvdata(pdev);
+
+	pci_lock_rescan_remove();
+	pci_stop_root_bus(bus);
+	pci_remove_root_bus(bus);
+	pci_unlock_rescan_remove();
+
 	return 0;
 }
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 45319ee3b484..dea3ec7592a2 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -95,5 +95,6 @@ static struct platform_driver gen_pci_driver = {
 		.suppress_bind_attrs = true,
 	},
 	.probe = gen_pci_probe,
+	.remove = pci_host_common_remove,
 };
 builtin_platform_driver(gen_pci_driver);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 50cdefe3f6d3..6cc5036ac83c 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -433,7 +433,7 @@ enum hv_pcibus_state {
 struct hv_pcibus_device {
 	struct pci_sysdata sysdata;
 	enum hv_pcibus_state state;
-	atomic_t remove_lock;
+	refcount_t remove_lock;
 	struct hv_device *hdev;
 	resource_size_t low_mmio_space;
 	resource_size_t high_mmio_space;
@@ -488,17 +488,6 @@ enum hv_pcichild_state {
 	hv_pcichild_maximum
 };
 
-enum hv_pcidev_ref_reason {
-	hv_pcidev_ref_invalid = 0,
-	hv_pcidev_ref_initial,
-	hv_pcidev_ref_by_slot,
-	hv_pcidev_ref_packet,
-	hv_pcidev_ref_pnp,
-	hv_pcidev_ref_childlist,
-	hv_pcidev_irqdata,
-	hv_pcidev_ref_max
-};
-
 struct hv_pci_dev {
 	/* List protected by pci_rescan_remove_lock */
 	struct list_head list_entry;
@@ -548,14 +537,41 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp,
 
 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
 						u32 wslot);
-static void get_pcichild(struct hv_pci_dev *hv_pcidev,
-			 enum hv_pcidev_ref_reason reason);
-static void put_pcichild(struct hv_pci_dev *hv_pcidev,
-			 enum hv_pcidev_ref_reason reason);
+
+static void get_pcichild(struct hv_pci_dev *hpdev)
+{
+	refcount_inc(&hpdev->refs);
+}
+
+static void put_pcichild(struct hv_pci_dev *hpdev)
+{
+	if (refcount_dec_and_test(&hpdev->refs))
+		kfree(hpdev);
+}
 
 static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
 static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
 
+/*
+ * There is no good way to get notified from vmbus_onoffer_rescind(),
+ * so let's use polling here, since this is not a hot path.
+ */
+static int wait_for_response(struct hv_device *hdev,
+			     struct completion *comp)
+{
+	while (true) {
+		if (hdev->channel->rescind) {
+			dev_warn_once(&hdev->device, "The device is gone.\n");
+			return -ENODEV;
+		}
+
+		if (wait_for_completion_timeout(comp, HZ / 10))
+			break;
+	}
+
+	return 0;
+}
+
 /**
  * devfn_to_wslot() - Convert from Linux PCI slot to Windows
  * @devfn:	The Linux representation of PCI slot
@@ -762,7 +778,7 @@ static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
 
 	_hv_pcifront_read_config(hpdev, where, size, val);
 
-	put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+	put_pcichild(hpdev);
 	return PCIBIOS_SUCCESSFUL;
 }
 
@@ -790,7 +806,7 @@ static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
 
 	_hv_pcifront_write_config(hpdev, where, size, val);
 
-	put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+	put_pcichild(hpdev);
 	return PCIBIOS_SUCCESSFUL;
 }
 
@@ -856,7 +872,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
 	}
 
 	hv_int_desc_free(hpdev, int_desc);
-	put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+	put_pcichild(hpdev);
 }
 
 static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
@@ -1186,13 +1202,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 	msg->address_lo = comp.int_desc.address & 0xffffffff;
 	msg->data = comp.int_desc.data;
 
-	put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+	put_pcichild(hpdev);
 	return;
 
 free_int_desc:
 	kfree(int_desc);
 drop_reference:
-	put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+	put_pcichild(hpdev);
 return_null_message:
 	msg->address_hi = 0;
 	msg->address_lo = 0;
@@ -1283,7 +1299,6 @@ static u64 get_bar_size(u64 bar_val)
  */
 static void survey_child_resources(struct hv_pcibus_device *hbus)
 {
-	struct list_head *iter;
 	struct hv_pci_dev *hpdev;
 	resource_size_t bar_size = 0;
 	unsigned long flags;
@@ -1309,8 +1324,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
 	 * for a child device are a power of 2 in size and aligned in memory,
 	 * so it's sufficient to just add them up without tracking alignment.
 	 */
-	list_for_each(iter, &hbus->children) {
-		hpdev = container_of(iter, struct hv_pci_dev, list_entry);
+	list_for_each_entry(hpdev, &hbus->children, list_entry) {
 		for (i = 0; i < 6; i++) {
 			if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
 				dev_err(&hbus->hdev->device,
@@ -1363,7 +1377,6 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
 	resource_size_t low_base = 0;
 	resource_size_t bar_size;
 	struct hv_pci_dev *hpdev;
-	struct list_head *iter;
 	unsigned long flags;
 	u64 bar_val;
 	u32 command;
@@ -1385,9 +1398,7 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
 
 	/* Pick addresses for the BARs. */
 	do {
-		list_for_each(iter, &hbus->children) {
-			hpdev = container_of(iter, struct hv_pci_dev,
-					     list_entry);
+		list_for_each_entry(hpdev, &hbus->children, list_entry) {
 			for (i = 0; i < 6; i++) {
 				bar_val = hpdev->probed_bar[i];
 				if (bar_val == 0)
@@ -1508,19 +1519,6 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
 	complete(&completion->host_event);
 }
 
-static void get_pcichild(struct hv_pci_dev *hpdev,
-			    enum hv_pcidev_ref_reason reason)
-{
-	refcount_inc(&hpdev->refs);
-}
-
-static void put_pcichild(struct hv_pci_dev *hpdev,
-			    enum hv_pcidev_ref_reason reason)
-{
-	if (refcount_dec_and_test(&hpdev->refs))
-		kfree(hpdev);
-}
-
 /**
  * new_pcichild_device() - Create a new child device
  * @hbus:	The internal struct tracking this root PCI bus.
@@ -1568,24 +1566,14 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
 	if (ret)
 		goto error;
 
-	wait_for_completion(&comp_pkt.host_event);
+	if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
+		goto error;
 
 	hpdev->desc = *desc;
 	refcount_set(&hpdev->refs, 1);
-	get_pcichild(hpdev, hv_pcidev_ref_childlist);
+	get_pcichild(hpdev);
 	spin_lock_irqsave(&hbus->device_list_lock, flags);
 
-	/*
-	 * When a device is being added to the bus, we set the PCI domain
-	 * number to be the device serial number, which is non-zero and
-	 * unique on the same VM.  The serial numbers start with 1, and
-	 * increase by 1 for each device.  So device names including this
-	 * can have shorter names than based on the bus instance UUID.
-	 * Only the first device serial number is used for domain, so the
-	 * domain number will not change after the first device is added.
-	 */
-	if (list_empty(&hbus->children))
-		hbus->sysdata.domain = desc->ser;
 	list_add_tail(&hpdev->list_entry, &hbus->children);
 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
 	return hpdev;
@@ -1618,7 +1606,7 @@ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
 	list_for_each_entry(iter, &hbus->children, list_entry) {
 		if (iter->desc.win_slot.slot == wslot) {
 			hpdev = iter;
-			get_pcichild(hpdev, hv_pcidev_ref_by_slot);
+			get_pcichild(hpdev);
 			break;
 		}
 	}
@@ -1654,7 +1642,6 @@ static void pci_devices_present_work(struct work_struct *work)
 {
 	u32 child_no;
 	bool found;
-	struct list_head *iter;
 	struct pci_function_description *new_desc;
 	struct hv_pci_dev *hpdev;
 	struct hv_pcibus_device *hbus;
@@ -1691,10 +1678,8 @@ static void pci_devices_present_work(struct work_struct *work)
 
 	/* First, mark all existing children as reported missing. */
 	spin_lock_irqsave(&hbus->device_list_lock, flags);
-	list_for_each(iter, &hbus->children) {
-			hpdev = container_of(iter, struct hv_pci_dev,
-					     list_entry);
-			hpdev->reported_missing = true;
+	list_for_each_entry(hpdev, &hbus->children, list_entry) {
+		hpdev->reported_missing = true;
 	}
 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
 
@@ -1704,11 +1689,8 @@ static void pci_devices_present_work(struct work_struct *work)
 		new_desc = &dr->func[child_no];
 
 		spin_lock_irqsave(&hbus->device_list_lock, flags);
-		list_for_each(iter, &hbus->children) {
-			hpdev = container_of(iter, struct hv_pci_dev,
-					     list_entry);
-			if ((hpdev->desc.win_slot.slot ==
-			     new_desc->win_slot.slot) &&
+		list_for_each_entry(hpdev, &hbus->children, list_entry) {
+			if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
 			    (hpdev->desc.v_id == new_desc->v_id) &&
 			    (hpdev->desc.d_id == new_desc->d_id) &&
 			    (hpdev->desc.ser == new_desc->ser)) {
@@ -1730,12 +1712,10 @@ static void pci_devices_present_work(struct work_struct *work)
 	spin_lock_irqsave(&hbus->device_list_lock, flags);
 	do {
 		found = false;
-		list_for_each(iter, &hbus->children) {
-			hpdev = container_of(iter, struct hv_pci_dev,
-					     list_entry);
+		list_for_each_entry(hpdev, &hbus->children, list_entry) {
 			if (hpdev->reported_missing) {
 				found = true;
-				put_pcichild(hpdev, hv_pcidev_ref_childlist);
+				put_pcichild(hpdev);
 				list_move_tail(&hpdev->list_entry, &removed);
 				break;
 			}
@@ -1748,7 +1728,7 @@ static void pci_devices_present_work(struct work_struct *work)
 		hpdev = list_first_entry(&removed, struct hv_pci_dev,
 					 list_entry);
 		list_del(&hpdev->list_entry);
-		put_pcichild(hpdev, hv_pcidev_ref_initial);
+		put_pcichild(hpdev);
 	}
 
 	switch (hbus->state) {
@@ -1883,8 +1863,8 @@ static void hv_eject_device_work(struct work_struct *work)
 			 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
 			 VM_PKT_DATA_INBAND, 0);
 
-	put_pcichild(hpdev, hv_pcidev_ref_childlist);
-	put_pcichild(hpdev, hv_pcidev_ref_pnp);
+	put_pcichild(hpdev);
+	put_pcichild(hpdev);
 	put_hvpcibus(hpdev->hbus);
 }
 
@@ -1899,7 +1879,7 @@ static void hv_eject_device_work(struct work_struct *work)
 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
 {
 	hpdev->state = hv_pcichild_ejecting;
-	get_pcichild(hpdev, hv_pcidev_ref_pnp);
+	get_pcichild(hpdev);
 	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
 	get_hvpcibus(hpdev->hbus);
 	queue_work(hpdev->hbus->wq, &hpdev->wrk);
@@ -1999,8 +1979,7 @@ static void hv_pci_onchannelcallback(void *context)
 						      dev_message->wslot.slot);
 				if (hpdev) {
 					hv_pci_eject_device(hpdev);
-					put_pcichild(hpdev,
-							hv_pcidev_ref_by_slot);
+					put_pcichild(hpdev);
 				}
 				break;
 
@@ -2069,15 +2048,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
 				sizeof(struct pci_version_request),
 				(unsigned long)pkt, VM_PKT_DATA_INBAND,
 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+		if (!ret)
+			ret = wait_for_response(hdev, &comp_pkt.host_event);
+
 		if (ret) {
 			dev_err(&hdev->device,
-				"PCI Pass-through VSP failed sending version reqquest: %#x",
+				"PCI Pass-through VSP failed to request version: %d",
 				ret);
 			goto exit;
 		}
 
-		wait_for_completion(&comp_pkt.host_event);
-
 		if (comp_pkt.completion_status >= 0) {
 			pci_protocol_version = pci_protocol_versions[i];
 			dev_info(&hdev->device,
@@ -2286,11 +2266,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
 	ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
 			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+	if (!ret)
+		ret = wait_for_response(hdev, &comp_pkt.host_event);
+
 	if (ret)
 		goto exit;
 
-	wait_for_completion(&comp_pkt.host_event);
-
 	if (comp_pkt.completion_status < 0) {
 		dev_err(&hdev->device,
 			"PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -2330,11 +2311,10 @@ static int hv_pci_query_relations(struct hv_device *hdev)
 
 	ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
 			       0, VM_PKT_DATA_INBAND, 0);
-	if (ret)
-		return ret;
+	if (!ret)
+		ret = wait_for_response(hdev, &comp);
 
-	wait_for_completion(&comp);
-	return 0;
+	return ret;
 }
 
 /**
@@ -2398,17 +2378,17 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
 				PCI_RESOURCES_ASSIGNED2;
 			res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
 		}
-		put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+		put_pcichild(hpdev);
 
 		ret = vmbus_sendpacket(hdev->channel, &pkt->message,
 				size_res, (unsigned long)pkt,
 				VM_PKT_DATA_INBAND,
 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+		if (!ret)
+			ret = wait_for_response(hdev, &comp_pkt.host_event);
 		if (ret)
 			break;
 
-		wait_for_completion(&comp_pkt.host_event);
-
 		if (comp_pkt.completion_status < 0) {
 			ret = -EPROTO;
 			dev_err(&hdev->device,
@@ -2446,7 +2426,7 @@ static int hv_send_resources_released(struct hv_device *hdev)
 		pkt.message_type.type = PCI_RESOURCES_RELEASED;
 		pkt.wslot.slot = hpdev->desc.win_slot.slot;
 
-		put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+		put_pcichild(hpdev);
 
 		ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
 				       VM_PKT_DATA_INBAND, 0);
@@ -2459,12 +2439,12 @@ static int hv_send_resources_released(struct hv_device *hdev)
 
 static void get_hvpcibus(struct hv_pcibus_device *hbus)
 {
-	atomic_inc(&hbus->remove_lock);
+	refcount_inc(&hbus->remove_lock);
 }
 
 static void put_hvpcibus(struct hv_pcibus_device *hbus)
 {
-	if (atomic_dec_and_test(&hbus->remove_lock))
+	if (refcount_dec_and_test(&hbus->remove_lock))
 		complete(&hbus->remove_event);
 }
 
@@ -2508,7 +2488,7 @@ static int hv_pci_probe(struct hv_device *hdev,
 			       hdev->dev_instance.b[8] << 8;
 
 	hbus->hdev = hdev;
-	atomic_inc(&hbus->remove_lock);
+	refcount_set(&hbus->remove_lock, 1);
 	INIT_LIST_HEAD(&hbus->children);
 	INIT_LIST_HEAD(&hbus->dr_list);
 	INIT_LIST_HEAD(&hbus->resources_for_children);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 5d4dccfc9d81..23e270839e6a 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -21,6 +21,8 @@
 #include <linux/of_pci.h>
 #include <linux/of_platform.h>
 
+#include "../pci.h"
+
 /*
  * PCIe unit register offsets.
  */
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index dd4f1a6b57c5..326171cb1a97 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -21,6 +21,8 @@
 #include <linux/sizes.h>
 #include <linux/slab.h>
 
+#include "../pci.h"
+
 /* AHB-PCI Bridge PCI communication registers */
 #define RCAR_AHBPCI_PCICOM_OFFSET	0x800
 
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 389e74be846c..f4f53d092e00 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -40,6 +40,8 @@
 #include <soc/tegra/cpuidle.h>
 #include <soc/tegra/pmc.h>
 
+#include "../pci.h"
+
 #define INT_PCI_MSI_NR (8 * 32)
 
 /* register definitions */
diff --git a/drivers/pci/host/pci-v3-semi.c b/drivers/pci/host/pci-v3-semi.c
index 0a4dea796663..68b8bfbdb867 100644
--- a/drivers/pci/host/pci-v3-semi.c
+++ b/drivers/pci/host/pci-v3-semi.c
@@ -33,6 +33,8 @@
 #include <linux/regmap.h>
 #include <linux/clk.h>
 
+#include "../pci.h"
+
 #define V3_PCI_VENDOR			0x00000000
 #define V3_PCI_DEVICE			0x00000002
 #define V3_PCI_CMD			0x00000004
@@ -791,7 +793,8 @@ static int v3_pci_probe(struct platform_device *pdev)
 	if (IS_ERR(v3->config_base))
 		return PTR_ERR(v3->config_base);
 
-	ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &io_base);
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+						    &io_base);
 	if (ret)
 		return ret;
 
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 5b3876f5312b..994f32061b32 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -15,6 +15,8 @@
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 
+#include "../pci.h"
+
 static void __iomem *versatile_pci_base;
 static void __iomem *versatile_cfg_base[2];
 
@@ -64,11 +66,10 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
 						     struct list_head *res)
 {
 	int err, mem = 1, res_valid = 0;
-	struct device_node *np = dev->of_node;
 	resource_size_t iobase;
 	struct resource_entry *win, *tmp;
 
-	err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
 	if (err)
 		return err;
 
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 0a0d7ee6d3c9..d854d67e873c 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -22,6 +22,8 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
+#include "../pci.h"
+
 #define PCIECORE_CTLANDSTATUS		0x50
 #define PIM1_1L				0x80
 #define IBAR2				0x98
@@ -632,7 +634,8 @@ static int xgene_pcie_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
-	ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase);
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+						    &iobase);
 	if (ret)
 		return ret;
 
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index a6af62e0256d..7d05e51205b3 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -17,6 +17,8 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
+#include "../pci.h"
+
 #define RP_TX_REG0			0x2000
 #define RP_TX_REG1			0x2004
 #define RP_TX_CNTRL			0x2008
@@ -488,11 +490,10 @@ static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
 {
 	int err, res_valid = 0;
 	struct device *dev = &pcie->pdev->dev;
-	struct device_node *np = dev->of_node;
 	struct resource_entry *win;
 
-	err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
-					       NULL);
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    &pcie->resources, NULL);
 	if (err)
 		return err;
 
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index e764a2a2693c..f30f5f3fb5c1 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -16,6 +16,7 @@
 #include <linux/of_platform.h>
 #include <linux/phy/phy.h>
 
+#include "../pci.h"
 #include "pcie-iproc.h"
 
 static const struct of_device_id iproc_pcie_of_match_table[] = {
@@ -99,8 +100,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
 		pcie->phy = NULL;
 	}
 
-	ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
-					       &iobase);
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
+						    &iobase);
 	if (ret) {
 		dev_err(dev, "unable to get PCI host bridge resources\n");
 		return ret;
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
index a8b20c5012a9..0baabe30858f 100644
--- a/drivers/pci/host/pcie-mediatek.c
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -11,8 +11,10 @@
 #include <linux/delay.h>
 #include <linux/iopoll.h>
 #include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
 #include <linux/kernel.h>
+#include <linux/msi.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
 #include <linux/of_platform.h>
@@ -22,6 +24,8 @@
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
 
+#include "../pci.h"
+
 /* PCIe shared registers */
 #define PCIE_SYS_CFG		0x00
 #define PCIE_INT_ENABLE		0x0c
@@ -66,6 +70,10 @@
 
 /* PCIe V2 per-port registers */
 #define PCIE_MSI_VECTOR		0x0c0
+
+#define PCIE_CONF_VEND_ID	0x100
+#define PCIE_CONF_CLASS_ID	0x106
+
 #define PCIE_INT_MASK		0x420
 #define INTX_MASK		GENMASK(19, 16)
 #define INTX_SHIFT		16
@@ -125,13 +133,13 @@ struct mtk_pcie_port;
 
 /**
  * struct mtk_pcie_soc - differentiate between host generations
- * @has_msi: whether this host supports MSI interrupts or not
+ * @need_fix_class_id: whether this host's class ID needed to be fixed or not
  * @ops: pointer to configuration access functions
  * @startup: pointer to controller setting functions
  * @setup_irq: pointer to initialize IRQ functions
  */
 struct mtk_pcie_soc {
-	bool has_msi;
+	bool need_fix_class_id;
 	struct pci_ops *ops;
 	int (*startup)(struct mtk_pcie_port *port);
 	int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
@@ -155,7 +163,9 @@ struct mtk_pcie_soc {
  * @lane: lane count
  * @slot: port slot
  * @irq_domain: legacy INTx IRQ domain
+ * @inner_domain: inner IRQ domain
  * @msi_domain: MSI IRQ domain
+ * @lock: protect the msi_irq_in_use bitmap
  * @msi_irq_in_use: bit map for assigned MSI IRQ
  */
 struct mtk_pcie_port {
@@ -173,7 +183,9 @@ struct mtk_pcie_port {
 	u32 lane;
 	u32 slot;
 	struct irq_domain *irq_domain;
+	struct irq_domain *inner_domain;
 	struct irq_domain *msi_domain;
+	struct mutex lock;
 	DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
 };
 
@@ -375,6 +387,7 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
 {
 	struct mtk_pcie *pcie = port->pcie;
 	struct resource *mem = &pcie->mem;
+	const struct mtk_pcie_soc *soc = port->pcie->soc;
 	u32 val;
 	size_t size;
 	int err;
@@ -403,6 +416,15 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
 	       PCIE_MAC_SRSTB | PCIE_CRSTB;
 	writel(val, port->base + PCIE_RST_CTRL);
 
+	/* Set up vendor ID and class code */
+	if (soc->need_fix_class_id) {
+		val = PCI_VENDOR_ID_MEDIATEK;
+		writew(val, port->base + PCIE_CONF_VEND_ID);
+
+		val = PCI_CLASS_BRIDGE_HOST;
+		writew(val, port->base + PCIE_CONF_CLASS_ID);
+	}
+
 	/* 100ms timeout value should be enough for Gen1/2 training */
 	err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
 				 !!(val & PCIE_PORT_LINKUP_V2), 20,
@@ -430,103 +452,130 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
 	return 0;
 }
 
-static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port)
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 {
-	int msi;
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	phys_addr_t addr;
 
-	msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
-	if (msi < MTK_MSI_IRQS_NUM)
-		set_bit(msi, port->msi_irq_in_use);
-	else
-		return -ENOSPC;
+	/* MT2712/MT7622 only support 32-bit MSI addresses */
+	addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+	msg->address_hi = 0;
+	msg->address_lo = lower_32_bits(addr);
+
+	msg->data = data->hwirq;
 
-	return msi;
+	dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
+		(int)data->hwirq, msg->address_hi, msg->address_lo);
 }
 
-static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq)
+static int mtk_msi_set_affinity(struct irq_data *irq_data,
+				const struct cpumask *mask, bool force)
 {
-	clear_bit(hwirq, port->msi_irq_in_use);
+	 return -EINVAL;
 }
 
-static int mtk_pcie_msi_setup_irq(struct msi_controller *chip,
-				  struct pci_dev *pdev, struct msi_desc *desc)
+static void mtk_msi_ack_irq(struct irq_data *data)
 {
-	struct mtk_pcie_port *port;
-	struct msi_msg msg;
-	unsigned int irq;
-	int hwirq;
-	phys_addr_t msg_addr;
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	u32 hwirq = data->hwirq;
 
-	port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
-	if (!port)
-		return -EINVAL;
+	writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
+}
 
-	hwirq = mtk_pcie_msi_alloc(port);
-	if (hwirq < 0)
-		return hwirq;
+static struct irq_chip mtk_msi_bottom_irq_chip = {
+	.name			= "MTK MSI",
+	.irq_compose_msi_msg	= mtk_compose_msi_msg,
+	.irq_set_affinity	= mtk_msi_set_affinity,
+	.irq_ack		= mtk_msi_ack_irq,
+};
 
-	irq = irq_create_mapping(port->msi_domain, hwirq);
-	if (!irq) {
-		mtk_pcie_msi_free(port, hwirq);
-		return -EINVAL;
-	}
+static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				     unsigned int nr_irqs, void *args)
+{
+	struct mtk_pcie_port *port = domain->host_data;
+	unsigned long bit;
+
+	WARN_ON(nr_irqs != 1);
+	mutex_lock(&port->lock);
 
-	chip->dev = &pdev->dev;
+	bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
+	if (bit >= MTK_MSI_IRQS_NUM) {
+		mutex_unlock(&port->lock);
+		return -ENOSPC;
+	}
 
-	irq_set_msi_desc(irq, desc);
+	__set_bit(bit, port->msi_irq_in_use);
 
-	/* MT2712/MT7622 only support 32-bit MSI addresses */
-	msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
-	msg.address_hi = 0;
-	msg.address_lo = lower_32_bits(msg_addr);
-	msg.data = hwirq;
+	mutex_unlock(&port->lock);
 
-	pci_write_msi_msg(irq, &msg);
+	irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
+			    domain->host_data, handle_edge_irq,
+			    NULL, NULL);
 
 	return 0;
 }
 
-static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
+				     unsigned int virq, unsigned int nr_irqs)
 {
-	struct pci_dev *pdev = to_pci_dev(chip->dev);
-	struct irq_data *d = irq_get_irq_data(irq);
-	irq_hw_number_t hwirq = irqd_to_hwirq(d);
-	struct mtk_pcie_port *port;
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
 
-	port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
-	if (!port)
-		return;
+	mutex_lock(&port->lock);
 
-	irq_dispose_mapping(irq);
-	mtk_pcie_msi_free(port, hwirq);
+	if (!test_bit(d->hwirq, port->msi_irq_in_use))
+		dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
+			d->hwirq);
+	else
+		__clear_bit(d->hwirq, port->msi_irq_in_use);
+
+	mutex_unlock(&port->lock);
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
 }
 
-static struct msi_controller mtk_pcie_msi_chip = {
-	.setup_irq = mtk_pcie_msi_setup_irq,
-	.teardown_irq = mtk_msi_teardown_irq,
+static const struct irq_domain_ops msi_domain_ops = {
+	.alloc	= mtk_pcie_irq_domain_alloc,
+	.free	= mtk_pcie_irq_domain_free,
 };
 
 static struct irq_chip mtk_msi_irq_chip = {
-	.name = "MTK PCIe MSI",
-	.irq_enable = pci_msi_unmask_irq,
-	.irq_disable = pci_msi_mask_irq,
-	.irq_mask = pci_msi_mask_irq,
-	.irq_unmask = pci_msi_unmask_irq,
+	.name		= "MTK PCIe MSI",
+	.irq_ack	= irq_chip_ack_parent,
+	.irq_mask	= pci_msi_mask_irq,
+	.irq_unmask	= pci_msi_unmask_irq,
 };
 
-static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
-			    irq_hw_number_t hwirq)
+static struct msi_domain_info mtk_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_PCI_MSIX),
+	.chip	= &mtk_msi_irq_chip,
+};
+
+static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
 {
-	irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq);
-	irq_set_chip_data(irq, domain->host_data);
+	struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+
+	mutex_init(&port->lock);
+
+	port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
+						      &msi_domain_ops, port);
+	if (!port->inner_domain) {
+		dev_err(port->pcie->dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
+						     port->inner_domain);
+	if (!port->msi_domain) {
+		dev_err(port->pcie->dev, "failed to create MSI domain\n");
+		irq_domain_remove(port->inner_domain);
+		return -ENOMEM;
+	}
 
 	return 0;
 }
 
-static const struct irq_domain_ops msi_domain_ops = {
-	.map = mtk_pcie_msi_map,
-};
-
 static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
 {
 	u32 val;
@@ -559,6 +608,7 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
 {
 	struct device *dev = port->pcie->dev;
 	struct device_node *pcie_intc_node;
+	int ret;
 
 	/* Setup INTx */
 	pcie_intc_node = of_get_next_child(node, NULL);
@@ -575,27 +625,28 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
 	}
 
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
-		port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM,
-							 &msi_domain_ops,
-							 &mtk_pcie_msi_chip);
-		if (!port->msi_domain) {
-			dev_err(dev, "failed to create MSI IRQ domain\n");
-			return -ENODEV;
-		}
+		ret = mtk_pcie_allocate_msi_domains(port);
+		if (ret)
+			return ret;
+
 		mtk_pcie_enable_msi(port);
 	}
 
 	return 0;
 }
 
-static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
+static void mtk_pcie_intr_handler(struct irq_desc *desc)
 {
-	struct mtk_pcie_port *port = (struct mtk_pcie_port *)data;
+	struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+	struct irq_chip *irqchip = irq_desc_get_chip(desc);
 	unsigned long status;
 	u32 virq;
 	u32 bit = INTX_SHIFT;
 
-	while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) {
+	chained_irq_enter(irqchip, desc);
+
+	status = readl(port->base + PCIE_INT_STATUS);
+	if (status & INTX_MASK) {
 		for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
 			/* Clear the INTx */
 			writel(1 << bit, port->base + PCIE_INT_STATUS);
@@ -606,14 +657,12 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
 	}
 
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
-		while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) {
+		if (status & MSI_STATUS){
 			unsigned long imsi_status;
 
 			while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
 				for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
-					/* Clear the MSI */
-					writel(1 << bit, port->base + PCIE_IMSI_STATUS);
-					virq = irq_find_mapping(port->msi_domain, bit);
+					virq = irq_find_mapping(port->inner_domain, bit);
 					generic_handle_irq(virq);
 				}
 			}
@@ -622,7 +671,9 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
 		}
 	}
 
-	return IRQ_HANDLED;
+	chained_irq_exit(irqchip, desc);
+
+	return;
 }
 
 static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
@@ -633,20 +684,15 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
 	struct platform_device *pdev = to_platform_device(dev);
 	int err, irq;
 
-	irq = platform_get_irq(pdev, port->slot);
-	err = devm_request_irq(dev, irq, mtk_pcie_intr_handler,
-			       IRQF_SHARED, "mtk-pcie", port);
-	if (err) {
-		dev_err(dev, "unable to request IRQ %d\n", irq);
-		return err;
-	}
-
 	err = mtk_pcie_init_irq_domain(port, node);
 	if (err) {
 		dev_err(dev, "failed to init PCIe IRQ domain\n");
 		return err;
 	}
 
+	irq = platform_get_irq(pdev, port->slot);
+	irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
+
 	return 0;
 }
 
@@ -1080,8 +1126,6 @@ static int mtk_pcie_register_host(struct pci_host_bridge *host)
 	host->map_irq = of_irq_parse_and_map_pci;
 	host->swizzle_irq = pci_common_swizzle;
 	host->sysdata = pcie;
-	if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi)
-		host->msi = &mtk_pcie_msi_chip;
 
 	err = pci_scan_root_bus_bridge(host);
 	if (err < 0)
@@ -1142,8 +1186,14 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
 	.startup = mtk_pcie_startup_port,
 };
 
-static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
-	.has_msi = true,
+static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
+	.ops = &mtk_pcie_ops_v2,
+	.startup = mtk_pcie_startup_port_v2,
+	.setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
+	.need_fix_class_id = true,
 	.ops = &mtk_pcie_ops_v2,
 	.startup = mtk_pcie_startup_port_v2,
 	.setup_irq = mtk_pcie_setup_irq,
@@ -1152,8 +1202,8 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
 static const struct of_device_id mtk_pcie_ids[] = {
 	{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
 	{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
-	{ .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 },
-	{ .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 },
+	{ .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
+	{ .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
 	{},
 };
 
diff --git a/drivers/pci/host/pcie-mobiveil.c b/drivers/pci/host/pcie-mobiveil.c
new file mode 100644
index 000000000000..4d6c20e47bed
--- /dev/null
+++ b/drivers/pci/host/pcie-mobiveil.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* register offsets and bit positions */
+
+/*
+ * translation tables are grouped into windows, each window registers are
+ * grouped into blocks of 4 or 16 registers each
+ */
+#define PAB_REG_BLOCK_SIZE	16
+#define PAB_EXT_REG_BLOCK_SIZE	4
+
+#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+
+#define LTSSM_STATUS		0x0404
+#define  LTSSM_STATUS_L0_MASK	0x3f
+#define  LTSSM_STATUS_L0	0x2d
+
+#define PAB_CTRL		0x0808
+#define  AMBA_PIO_ENABLE_SHIFT	0
+#define  PEX_PIO_ENABLE_SHIFT	1
+#define  PAGE_SEL_SHIFT	13
+#define  PAGE_SEL_MASK		0x3f
+#define  PAGE_LO_MASK		0x3ff
+#define  PAGE_SEL_EN		0xc00
+#define  PAGE_SEL_OFFSET_SHIFT	10
+
+#define PAB_AXI_PIO_CTRL	0x0840
+#define  APIO_EN_MASK		0xf
+
+#define PAB_PEX_PIO_CTRL	0x08c0
+#define  PIO_ENABLE_SHIFT	0
+
+#define PAB_INTP_AMBA_MISC_ENB		0x0b0c
+#define PAB_INTP_AMBA_MISC_STAT	0x0b1c
+#define  PAB_INTP_INTX_MASK		0x01e0
+#define  PAB_INTP_MSI_MASK		0x8
+
+#define PAB_AXI_AMAP_CTRL(win)	PAB_REG_ADDR(0x0ba0, win)
+#define  WIN_ENABLE_SHIFT	0
+#define  WIN_TYPE_SHIFT	1
+
+#define PAB_EXT_AXI_AMAP_SIZE(win)	PAB_EXT_REG_ADDR(0xbaf0, win)
+
+#define PAB_AXI_AMAP_AXI_WIN(win)	PAB_REG_ADDR(0x0ba4, win)
+#define  AXI_WINDOW_ALIGN_MASK		3
+
+#define PAB_AXI_AMAP_PEX_WIN_L(win)	PAB_REG_ADDR(0x0ba8, win)
+#define  PAB_BUS_SHIFT		24
+#define  PAB_DEVICE_SHIFT	19
+#define  PAB_FUNCTION_SHIFT	16
+
+#define PAB_AXI_AMAP_PEX_WIN_H(win)	PAB_REG_ADDR(0x0bac, win)
+#define PAB_INTP_AXI_PIO_CLASS		0x474
+
+#define PAB_PEX_AMAP_CTRL(win)	PAB_REG_ADDR(0x4ba0, win)
+#define  AMAP_CTRL_EN_SHIFT	0
+#define  AMAP_CTRL_TYPE_SHIFT	1
+
+#define PAB_EXT_PEX_AMAP_SIZEN(win)	PAB_EXT_REG_ADDR(0xbef0, win)
+#define PAB_PEX_AMAP_AXI_WIN(win)	PAB_REG_ADDR(0x4ba4, win)
+#define PAB_PEX_AMAP_PEX_WIN_L(win)	PAB_REG_ADDR(0x4ba8, win)
+#define PAB_PEX_AMAP_PEX_WIN_H(win)	PAB_REG_ADDR(0x4bac, win)
+
+/* starting offset of INTX bits in status register */
+#define PAB_INTX_START	5
+
+/* supported number of MSI interrupts */
+#define PCI_NUM_MSI	16
+
+/* MSI registers */
+#define MSI_BASE_LO_OFFSET	0x04
+#define MSI_BASE_HI_OFFSET	0x08
+#define MSI_SIZE_OFFSET	0x0c
+#define MSI_ENABLE_OFFSET	0x14
+#define MSI_STATUS_OFFSET	0x18
+#define MSI_DATA_OFFSET	0x20
+#define MSI_ADDR_L_OFFSET	0x24
+#define MSI_ADDR_H_OFFSET	0x28
+
+/* outbound and inbound window definitions */
+#define WIN_NUM_0		0
+#define WIN_NUM_1		1
+#define CFG_WINDOW_TYPE	0
+#define IO_WINDOW_TYPE		1
+#define MEM_WINDOW_TYPE	2
+#define IB_WIN_SIZE		(256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS	8
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES	10
+#define LINK_WAIT_MIN	90000
+#define LINK_WAIT_MAX	100000
+
+struct mobiveil_msi {			/* MSI information */
+	struct mutex lock;		/* protect bitmap variable */
+	struct irq_domain *msi_domain;
+	struct irq_domain *dev_domain;
+	phys_addr_t msi_pages_phys;
+	int num_of_vectors;
+	DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
+};
+
+struct mobiveil_pcie {
+	struct platform_device *pdev;
+	struct list_head resources;
+	void __iomem *config_axi_slave_base;	/* endpoint config base */
+	void __iomem *csr_axi_slave_base;	/* root port config base */
+	void __iomem *apb_csr_base;	/* MSI register base */
+	void __iomem *pcie_reg_base;	/* Physical PCIe Controller Base */
+	struct irq_domain *intx_domain;
+	raw_spinlock_t intx_mask_lock;
+	int irq;
+	int apio_wins;
+	int ppio_wins;
+	int ob_wins_configured;		/* configured outbound windows */
+	int ib_wins_configured;		/* configured inbound windows */
+	struct resource *ob_io_res;
+	char root_bus_nr;
+	struct mobiveil_msi msi;
+};
+
+static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value,
+		const u32 reg)
+{
+	writel_relaxed(value, pcie->csr_axi_slave_base + reg);
+}
+
+static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg)
+{
+	return readl_relaxed(pcie->csr_axi_slave_base + reg);
+}
+
+static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
+{
+	return (csr_readl(pcie, LTSSM_STATUS) &
+		LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
+}
+
+static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+	struct mobiveil_pcie *pcie = bus->sysdata;
+
+	/* Only one device down on each root port */
+	if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
+		return false;
+
+	/*
+	 * Do not read more than one device on the bus directly
+	 * attached to RC
+	 */
+	if ((bus->primary == pcie->root_bus_nr) && (devfn > 0))
+		return false;
+
+	return true;
+}
+
+/*
+ * mobiveil_pcie_map_bus - routine to get the configuration base of either
+ * root port or endpoint
+ */
+static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
+					unsigned int devfn, int where)
+{
+	struct mobiveil_pcie *pcie = bus->sysdata;
+
+	if (!mobiveil_pcie_valid_device(bus, devfn))
+		return NULL;
+
+	if (bus->number == pcie->root_bus_nr) {
+		/* RC config access */
+		return pcie->csr_axi_slave_base + where;
+	}
+
+	/*
+	 * EP config access (in Config/APIO space)
+	 * Program PEX Address base (31..16 bits) with appropriate value
+	 * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
+	 * Relies on pci_lock serialization
+	 */
+	csr_writel(pcie, bus->number << PAB_BUS_SHIFT |
+			PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
+			PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT,
+			PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+	return pcie->config_axi_slave_base + where;
+}
+
+static struct pci_ops mobiveil_pcie_ops = {
+	.map_bus = mobiveil_pcie_map_bus,
+	.read = pci_generic_config_read,
+	.write = pci_generic_config_write,
+};
+
+static void mobiveil_pcie_isr(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
+	struct device *dev = &pcie->pdev->dev;
+	struct mobiveil_msi *msi = &pcie->msi;
+	u32 msi_data, msi_addr_lo, msi_addr_hi;
+	u32 intr_status, msi_status;
+	unsigned long shifted_status;
+	u32 bit, virq, val, mask;
+
+	/*
+	 * The core provides a single interrupt for both INTx/MSI messages.
+	 * So we'll read both INTx and MSI status
+	 */
+
+	chained_irq_enter(chip, desc);
+
+	/* read INTx status */
+	val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+	mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+	intr_status = val & mask;
+
+	/* Handle INTx */
+	if (intr_status & PAB_INTP_INTX_MASK) {
+		shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >>
+			PAB_INTX_START;
+		do {
+			for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
+				virq = irq_find_mapping(pcie->intx_domain,
+						bit + 1);
+				if (virq)
+					generic_handle_irq(virq);
+				else
+					dev_err_ratelimited(dev,
+						"unexpected IRQ, INT%d\n", bit);
+
+				/* clear interrupt */
+				csr_writel(pcie,
+					shifted_status << PAB_INTX_START,
+					PAB_INTP_AMBA_MISC_STAT);
+			}
+		} while ((shifted_status >> PAB_INTX_START) != 0);
+	}
+
+	/* read extra MSI status register */
+	msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
+
+	/* handle MSI interrupts */
+	while (msi_status & 1) {
+		msi_data = readl_relaxed(pcie->apb_csr_base
+				+ MSI_DATA_OFFSET);
+
+		/*
+		 * MSI_STATUS_OFFSET register gets updated to zero
+		 * once we pop not only the MSI data but also address
+		 * from MSI hardware FIFO. So keeping these following
+		 * two dummy reads.
+		 */
+		msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
+				MSI_ADDR_L_OFFSET);
+		msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
+				MSI_ADDR_H_OFFSET);
+		dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
+				msi_data, msi_addr_hi, msi_addr_lo);
+
+		virq = irq_find_mapping(msi->dev_domain, msi_data);
+		if (virq)
+			generic_handle_irq(virq);
+
+		msi_status = readl_relaxed(pcie->apb_csr_base +
+				MSI_STATUS_OFFSET);
+	}
+
+	/* Clear the interrupt status */
+	csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+	chained_irq_exit(chip, desc);
+}
+
+static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct platform_device *pdev = pcie->pdev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	const char *type;
+
+	type = of_get_property(node, "device_type", NULL);
+	if (!type || strcmp(type, "pci")) {
+		dev_err(dev, "invalid \"device_type\" %s\n", type);
+		return -EINVAL;
+	}
+
+	/* map config resource */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"config_axi_slave");
+	pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pcie->config_axi_slave_base))
+		return PTR_ERR(pcie->config_axi_slave_base);
+	pcie->ob_io_res = res;
+
+	/* map csr resource */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"csr_axi_slave");
+	pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pcie->csr_axi_slave_base))
+		return PTR_ERR(pcie->csr_axi_slave_base);
+	pcie->pcie_reg_base = res->start;
+
+	/* map MSI config resource */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
+	pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pcie->apb_csr_base))
+		return PTR_ERR(pcie->apb_csr_base);
+
+	/* read the number of windows requested */
+	if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
+		pcie->apio_wins = MAX_PIO_WINDOWS;
+
+	if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
+		pcie->ppio_wins = MAX_PIO_WINDOWS;
+
+	pcie->irq = platform_get_irq(pdev, 0);
+	if (pcie->irq <= 0) {
+		dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
+		return -ENODEV;
+	}
+
+	irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
+
+	return 0;
+}
+
+/*
+ * select_paged_register - routine to access paged register of root complex
+ *
+ * registers of RC are paged, for this scheme to work
+ * extracted higher 6 bits of the offset will be written to pg_sel
+ * field of PAB_CTRL register and rest of the lower 10 bits enabled with
+ * PAGE_SEL_EN are used as offset of the register.
+ */
+static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset)
+{
+	int pab_ctrl_dw, pg_sel;
+
+	/* clear pg_sel field */
+	pab_ctrl_dw = csr_readl(pcie, PAB_CTRL);
+	pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT));
+
+	/* set pg_sel field */
+	pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK;
+	pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT));
+	csr_writel(pcie, pab_ctrl_dw, PAB_CTRL);
+}
+
+static void write_paged_register(struct mobiveil_pcie *pcie,
+		u32 val, u32 offset)
+{
+	u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
+
+	select_paged_register(pcie, offset);
+	csr_writel(pcie, val, off);
+}
+
+static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset)
+{
+	u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
+
+	select_paged_register(pcie, offset);
+	return csr_readl(pcie, off);
+}
+
+static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
+		int pci_addr, u32 type, u64 size)
+{
+	int pio_ctrl_val;
+	int amap_ctrl_dw;
+	u64 size64 = ~(size - 1);
+
+	if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) {
+		dev_err(&pcie->pdev->dev,
+			"ERROR: max inbound windows reached !\n");
+		return;
+	}
+
+	pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+	csr_writel(pcie,
+		pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL);
+	amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num));
+	amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT));
+	amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT));
+
+	write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64),
+				PAB_PEX_AMAP_CTRL(win_num));
+
+	write_paged_register(pcie, upper_32_bits(size64),
+				PAB_EXT_PEX_AMAP_SIZEN(win_num));
+
+	write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
+	write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num));
+	write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num));
+}
+
+/*
+ * routine to program the outbound windows
+ */
+static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
+		u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size)
+{
+
+	u32 value, type;
+	u64 size64 = ~(size - 1);
+
+	if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) {
+		dev_err(&pcie->pdev->dev,
+			"ERROR: max outbound windows reached !\n");
+		return;
+	}
+
+	/*
+	 * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
+	 * to 4 KB in PAB_AXI_AMAP_CTRL register
+	 */
+	type = config_io_bit;
+	value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+	csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+			lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num));
+
+	write_paged_register(pcie, upper_32_bits(size64),
+				PAB_EXT_AXI_AMAP_SIZE(win_num));
+
+	/*
+	 * program AXI window base with appropriate value in
+	 * PAB_AXI_AMAP_AXI_WIN0 register
+	 */
+	value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num));
+	csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK),
+			PAB_AXI_AMAP_AXI_WIN(win_num));
+
+	value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+	csr_writel(pcie, lower_32_bits(pci_addr),
+			PAB_AXI_AMAP_PEX_WIN_L(win_num));
+	csr_writel(pcie, upper_32_bits(pci_addr),
+			PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+	pcie->ob_wins_configured++;
+}
+
+static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
+{
+	int retries;
+
+	/* check if the link is up or not */
+	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+		if (mobiveil_pcie_link_up(pcie))
+			return 0;
+
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+	}
+	dev_err(&pcie->pdev->dev, "link never came up\n");
+	return -ETIMEDOUT;
+}
+
+static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
+{
+	phys_addr_t msg_addr = pcie->pcie_reg_base;
+	struct mobiveil_msi *msi = &pcie->msi;
+
+	pcie->msi.num_of_vectors = PCI_NUM_MSI;
+	msi->msi_pages_phys = (phys_addr_t)msg_addr;
+
+	writel_relaxed(lower_32_bits(msg_addr),
+		pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
+	writel_relaxed(upper_32_bits(msg_addr),
+		pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
+	writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
+	writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
+}
+
+static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+{
+	u32 value, pab_ctrl, type = 0;
+	int err;
+	struct resource_entry *win, *tmp;
+
+	err = mobiveil_bringup_link(pcie);
+	if (err) {
+		dev_info(&pcie->pdev->dev, "link bring-up failed\n");
+		return err;
+	}
+
+	/*
+	 * program Bus Master Enable Bit in Command Register in PAB Config
+	 * Space
+	 */
+	value = csr_readl(pcie, PCI_COMMAND);
+	csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+		PCI_COMMAND_MASTER, PCI_COMMAND);
+
+	/*
+	 * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
+	 * register
+	 */
+	pab_ctrl = csr_readl(pcie, PAB_CTRL);
+	csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) |
+		(1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL);
+
+	csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+		PAB_INTP_AMBA_MISC_ENB);
+
+	/*
+	 * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
+	 * PAB_AXI_PIO_CTRL Register
+	 */
+	value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+	csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL);
+
+	/*
+	 * we'll program one outbound window for config reads and
+	 * another default inbound window for all the upstream traffic
+	 * rest of the outbound windows will be configured according to
+	 * the "ranges" field defined in device tree
+	 */
+
+	/* config outbound translation window */
+	program_ob_windows(pcie, pcie->ob_wins_configured,
+			pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE,
+			resource_size(pcie->ob_io_res));
+
+	/* memory inbound translation window */
+	program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
+		type = 0;
+		if (resource_type(win->res) == IORESOURCE_MEM)
+			type = MEM_WINDOW_TYPE;
+		if (resource_type(win->res) == IORESOURCE_IO)
+			type = IO_WINDOW_TYPE;
+		if (type) {
+			/* configure outbound translation window */
+			program_ob_windows(pcie, pcie->ob_wins_configured,
+				win->res->start, 0, type,
+				resource_size(win->res));
+		}
+	}
+
+	/* setup MSI hardware registers */
+	mobiveil_pcie_enable_msi(pcie);
+
+	return err;
+}
+
+static void mobiveil_mask_intx_irq(struct irq_data *data)
+{
+	struct irq_desc *desc = irq_to_desc(data->irq);
+	struct mobiveil_pcie *pcie;
+	unsigned long flags;
+	u32 mask, shifted_val;
+
+	pcie = irq_desc_get_chip_data(desc);
+	mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+	raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+	shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+	csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB);
+	raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static void mobiveil_unmask_intx_irq(struct irq_data *data)
+{
+	struct irq_desc *desc = irq_to_desc(data->irq);
+	struct mobiveil_pcie *pcie;
+	unsigned long flags;
+	u32 shifted_val, mask;
+
+	pcie = irq_desc_get_chip_data(desc);
+	mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+	raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+	shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+	csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB);
+	raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static struct irq_chip intx_irq_chip = {
+	.name = "mobiveil_pcie:intx",
+	.irq_enable = mobiveil_unmask_intx_irq,
+	.irq_disable = mobiveil_mask_intx_irq,
+	.irq_mask = mobiveil_mask_intx_irq,
+	.irq_unmask = mobiveil_unmask_intx_irq,
+};
+
+/* routine to setup the INTx related data */
+static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+		irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, domain->host_data);
+	return 0;
+}
+
+/* INTx domain operations structure */
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = mobiveil_pcie_intx_map,
+};
+
+static struct irq_chip mobiveil_msi_irq_chip = {
+	.name = "Mobiveil PCIe MSI",
+	.irq_mask = pci_msi_mask_irq,
+	.irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mobiveil_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+	.chip	= &mobiveil_msi_irq_chip,
+};
+
+static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
+	phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
+
+	msg->address_lo = lower_32_bits(addr);
+	msg->address_hi = upper_32_bits(addr);
+	msg->data = data->hwirq;
+
+	dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+		(int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
+		const struct cpumask *mask, bool force)
+{
+	return -EINVAL;
+}
+
+static struct irq_chip mobiveil_msi_bottom_irq_chip = {
+	.name			= "Mobiveil MSI",
+	.irq_compose_msi_msg	= mobiveil_compose_msi_msg,
+	.irq_set_affinity	= mobiveil_msi_set_affinity,
+};
+
+static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
+		unsigned int virq, unsigned int nr_irqs, void *args)
+{
+	struct mobiveil_pcie *pcie = domain->host_data;
+	struct mobiveil_msi *msi = &pcie->msi;
+	unsigned long bit;
+
+	WARN_ON(nr_irqs != 1);
+	mutex_lock(&msi->lock);
+
+	bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
+	if (bit >= msi->num_of_vectors) {
+		mutex_unlock(&msi->lock);
+		return -ENOSPC;
+	}
+
+	set_bit(bit, msi->msi_irq_in_use);
+
+	mutex_unlock(&msi->lock);
+
+	irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
+				domain->host_data, handle_level_irq,
+				NULL, NULL);
+	return 0;
+}
+
+static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
+		unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
+	struct mobiveil_msi *msi = &pcie->msi;
+
+	mutex_lock(&msi->lock);
+
+	if (!test_bit(d->hwirq, msi->msi_irq_in_use)) {
+		dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
+			d->hwirq);
+	} else {
+		__clear_bit(d->hwirq, msi->msi_irq_in_use);
+	}
+
+	mutex_unlock(&msi->lock);
+}
+static const struct irq_domain_ops msi_domain_ops = {
+	.alloc	= mobiveil_irq_msi_domain_alloc,
+	.free	= mobiveil_irq_msi_domain_free,
+};
+
+static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+	struct mobiveil_msi *msi = &pcie->msi;
+
+	mutex_init(&pcie->msi.lock);
+	msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+						&msi_domain_ops, pcie);
+	if (!msi->dev_domain) {
+		dev_err(dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+				&mobiveil_msi_domain_info, msi->dev_domain);
+	if (!msi->msi_domain) {
+		dev_err(dev, "failed to create MSI domain\n");
+		irq_domain_remove(msi->dev_domain);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct device_node *node = dev->of_node;
+	int ret;
+
+	/* setup INTx */
+	pcie->intx_domain = irq_domain_add_linear(node,
+				PCI_NUM_INTX, &intx_domain_ops, pcie);
+
+	if (!pcie->intx_domain) {
+		dev_err(dev, "Failed to get a INTx IRQ domain\n");
+		return -ENODEV;
+	}
+
+	raw_spin_lock_init(&pcie->intx_mask_lock);
+
+	/* setup MSI */
+	ret = mobiveil_allocate_msi_domains(pcie);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mobiveil_pcie_probe(struct platform_device *pdev)
+{
+	struct mobiveil_pcie *pcie;
+	struct pci_bus *bus;
+	struct pci_bus *child;
+	struct pci_host_bridge *bridge;
+	struct device *dev = &pdev->dev;
+	resource_size_t iobase;
+	int ret;
+
+	/* allocate the PCIe port */
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!bridge)
+		return -ENODEV;
+
+	pcie = pci_host_bridge_priv(bridge);
+	if (!pcie)
+		return -ENOMEM;
+
+	pcie->pdev = pdev;
+
+	ret = mobiveil_pcie_parse_dt(pcie);
+	if (ret) {
+		dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
+		return ret;
+	}
+
+	INIT_LIST_HEAD(&pcie->resources);
+
+	/* parse the host bridge base addresses from the device tree file */
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    &pcie->resources, &iobase);
+	if (ret) {
+		dev_err(dev, "Getting bridge resources failed\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * configure all inbound and outbound windows and prepare the RC for
+	 * config access
+	 */
+	ret = mobiveil_host_init(pcie);
+	if (ret) {
+		dev_err(dev, "Failed to initialize host\n");
+		goto error;
+	}
+
+	/* fixup for PCIe class register */
+	csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
+
+	/* initialize the IRQ domains */
+	ret = mobiveil_pcie_init_irq_domain(pcie);
+	if (ret) {
+		dev_err(dev, "Failed creating IRQ Domain\n");
+		goto error;
+	}
+
+	ret = devm_request_pci_bus_resources(dev, &pcie->resources);
+	if (ret)
+		goto error;
+
+	/* Initialize bridge */
+	list_splice_init(&pcie->resources, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = pcie->root_bus_nr;
+	bridge->ops = &mobiveil_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	/* setup the kernel resources for the newly added PCIe root bus */
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret)
+		goto error;
+
+	bus = bridge->bus;
+
+	pci_assign_unassigned_bus_resources(bus);
+	list_for_each_entry(child, &bus->children, node)
+		pcie_bus_configure_settings(child);
+	pci_bus_add_devices(bus);
+
+	return 0;
+error:
+	pci_free_resource_list(&pcie->resources);
+	return ret;
+}
+
+static const struct of_device_id mobiveil_pcie_of_match[] = {
+	{.compatible = "mbvl,gpex40-pcie",},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
+
+static struct platform_driver mobiveil_pcie_driver = {
+	.probe = mobiveil_pcie_probe,
+	.driver = {
+			.name = "mobiveil-pcie",
+			.of_match_table = mobiveil_pcie_of_match,
+			.suppress_bind_attrs = true,
+		},
+};
+
+builtin_platform_driver(mobiveil_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 6ab28f29ac6a..874d75c9ee4a 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -11,6 +11,7 @@
  * Author: Phil Edworthy <phil.edworthy@renesas.com>
  */
 
+#include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -24,18 +25,23 @@
 #include <linux/of_pci.h>
 #include <linux/of_platform.h>
 #include <linux/pci.h>
+#include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
+#include "../pci.h"
+
 #define PCIECAR			0x000010
 #define PCIECCTLR		0x000018
-#define  CONFIG_SEND_ENABLE	(1 << 31)
+#define  CONFIG_SEND_ENABLE	BIT(31)
 #define  TYPE0			(0 << 8)
-#define  TYPE1			(1 << 8)
+#define  TYPE1			BIT(8)
 #define PCIECDR			0x000020
 #define PCIEMSR			0x000028
 #define PCIEINTXR		0x000400
+#define PCIEPHYSR		0x0007f0
+#define  PHYRDY			BIT(0)
 #define PCIEMSITXR		0x000840
 
 /* Transfer control */
@@ -44,7 +50,7 @@
 #define PCIETSTR		0x02004
 #define  DATA_LINK_ACTIVE	1
 #define PCIEERRFR		0x02020
-#define  UNSUPPORTED_REQUEST	(1 << 4)
+#define  UNSUPPORTED_REQUEST	BIT(4)
 #define PCIEMSIFR		0x02044
 #define PCIEMSIALR		0x02048
 #define  MSIFE			1
@@ -57,17 +63,17 @@
 /* local address reg & mask */
 #define PCIELAR(x)		(0x02200 + ((x) * 0x20))
 #define PCIELAMR(x)		(0x02208 + ((x) * 0x20))
-#define  LAM_PREFETCH		(1 << 3)
-#define  LAM_64BIT		(1 << 2)
-#define  LAR_ENABLE		(1 << 1)
+#define  LAM_PREFETCH		BIT(3)
+#define  LAM_64BIT		BIT(2)
+#define  LAR_ENABLE		BIT(1)
 
 /* PCIe address reg & mask */
 #define PCIEPALR(x)		(0x03400 + ((x) * 0x20))
 #define PCIEPAUR(x)		(0x03404 + ((x) * 0x20))
 #define PCIEPAMR(x)		(0x03408 + ((x) * 0x20))
 #define PCIEPTCTLR(x)		(0x0340c + ((x) * 0x20))
-#define  PAR_ENABLE		(1 << 31)
-#define  IO_SPACE		(1 << 8)
+#define  PAR_ENABLE		BIT(31)
+#define  IO_SPACE		BIT(8)
 
 /* Configuration */
 #define PCICONF(x)		(0x010000 + ((x) * 0x4))
@@ -79,47 +85,46 @@
 #define IDSETR1			0x011004
 #define TLCTLR			0x011048
 #define MACSR			0x011054
-#define  SPCHGFIN		(1 << 4)
-#define  SPCHGFAIL		(1 << 6)
-#define  SPCHGSUC		(1 << 7)
+#define  SPCHGFIN		BIT(4)
+#define  SPCHGFAIL		BIT(6)
+#define  SPCHGSUC		BIT(7)
 #define  LINK_SPEED		(0xf << 16)
 #define  LINK_SPEED_2_5GTS	(1 << 16)
 #define  LINK_SPEED_5_0GTS	(2 << 16)
 #define MACCTLR			0x011058
-#define  SPEED_CHANGE		(1 << 24)
-#define  SCRAMBLE_DISABLE	(1 << 27)
+#define  SPEED_CHANGE		BIT(24)
+#define  SCRAMBLE_DISABLE	BIT(27)
 #define MACS2R			0x011078
 #define MACCGSPSETR		0x011084
-#define  SPCNGRSN		(1 << 31)
+#define  SPCNGRSN		BIT(31)
 
 /* R-Car H1 PHY */
 #define H1_PCIEPHYADRR		0x04000c
-#define  WRITE_CMD		(1 << 16)
-#define  PHY_ACK		(1 << 24)
+#define  WRITE_CMD		BIT(16)
+#define  PHY_ACK		BIT(24)
 #define  RATE_POS		12
 #define  LANE_POS		8
 #define  ADR_POS		0
 #define H1_PCIEPHYDOUTR		0x040014
-#define H1_PCIEPHYSR		0x040018
 
 /* R-Car Gen2 PHY */
 #define GEN2_PCIEPHYADDR	0x780
 #define GEN2_PCIEPHYDATA	0x784
 #define GEN2_PCIEPHYCTRL	0x78c
 
-#define INT_PCI_MSI_NR	32
+#define INT_PCI_MSI_NR		32
 
-#define RCONF(x)	(PCICONF(0)+(x))
-#define RPMCAP(x)	(PMCAP(0)+(x))
-#define REXPCAP(x)	(EXPCAP(0)+(x))
-#define RVCCAP(x)	(VCCAP(0)+(x))
+#define RCONF(x)		(PCICONF(0) + (x))
+#define RPMCAP(x)		(PMCAP(0) + (x))
+#define REXPCAP(x)		(EXPCAP(0) + (x))
+#define RVCCAP(x)		(VCCAP(0) + (x))
 
-#define  PCIE_CONF_BUS(b)	(((b) & 0xff) << 24)
-#define  PCIE_CONF_DEV(d)	(((d) & 0x1f) << 19)
-#define  PCIE_CONF_FUNC(f)	(((f) & 0x7) << 16)
+#define PCIE_CONF_BUS(b)	(((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d)	(((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f)	(((f) & 0x7) << 16)
 
-#define RCAR_PCI_MAX_RESOURCES 4
-#define MAX_NR_INBOUND_MAPS 6
+#define RCAR_PCI_MAX_RESOURCES	4
+#define MAX_NR_INBOUND_MAPS	6
 
 struct rcar_msi {
 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
@@ -139,10 +144,10 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
 /* Structure representing the PCIe interface */
 struct rcar_pcie {
 	struct device		*dev;
+	struct phy		*phy;
 	void __iomem		*base;
 	struct list_head	resources;
 	int			root_bus_nr;
-	struct clk		*clk;
 	struct clk		*bus_clk;
 	struct			rcar_msi msi;
 };
@@ -527,12 +532,12 @@ static void phy_write_reg(struct rcar_pcie *pcie,
 	phy_wait_for_ack(pcie);
 }
 
-static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
 {
 	unsigned int timeout = 10;
 
 	while (timeout--) {
-		if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+		if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
 			return 0;
 
 		msleep(5);
@@ -541,6 +546,21 @@ static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
 	return -ETIMEDOUT;
 }
 
+static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+{
+	unsigned int timeout = 10000;
+
+	while (timeout--) {
+		if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+			return 0;
+
+		udelay(5);
+		cpu_relax();
+	}
+
+	return -ETIMEDOUT;
+}
+
 static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 {
 	int err;
@@ -551,6 +571,10 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 	/* Set mode */
 	rcar_pci_write_reg(pcie, 1, PCIEMSR);
 
+	err = rcar_pcie_wait_for_phyrdy(pcie);
+	if (err)
+		return err;
+
 	/*
 	 * Initial header for port config space is type 1, set the device
 	 * class to match. Hardware takes care of propagating the IDSETR
@@ -605,10 +629,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
 	return 0;
 }
 
-static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
+static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie)
 {
-	unsigned int timeout = 10;
-
 	/* Initialize the phy */
 	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
 	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
@@ -627,17 +649,10 @@ static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
 	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
 	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 
-	while (timeout--) {
-		if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
-			return rcar_pcie_hw_init(pcie);
-
-		msleep(5);
-	}
-
-	return -ETIMEDOUT;
+	return 0;
 }
 
-static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
+static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie)
 {
 	/*
 	 * These settings come from the R-Car Series, 2nd Generation User's
@@ -654,7 +669,18 @@ static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
 	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
 	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
 
-	return rcar_pcie_hw_init(pcie);
+	return 0;
+}
+
+static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
+{
+	int err;
+
+	err = phy_init(pcie->phy);
+	if (err)
+		return err;
+
+	return phy_power_on(pcie->phy);
 }
 
 static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -842,6 +868,20 @@ static const struct irq_domain_ops msi_domain_ops = {
 	.map = rcar_msi_map,
 };
 
+static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie)
+{
+	struct rcar_msi *msi = &pcie->msi;
+	int i, irq;
+
+	for (i = 0; i < INT_PCI_MSI_NR; i++) {
+		irq = irq_find_mapping(msi->domain, i);
+		if (irq > 0)
+			irq_dispose_mapping(irq);
+	}
+
+	irq_domain_remove(msi->domain);
+}
+
 static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
 {
 	struct device *dev = pcie->dev;
@@ -896,16 +936,35 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
 	return 0;
 
 err:
-	irq_domain_remove(msi->domain);
+	rcar_pcie_unmap_msi(pcie);
 	return err;
 }
 
+static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie)
+{
+	struct rcar_msi *msi = &pcie->msi;
+
+	/* Disable all MSI interrupts */
+	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+	/* Disable address decoding of the MSI interrupt, MSIFE */
+	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
+
+	free_pages(msi->pages, 0);
+
+	rcar_pcie_unmap_msi(pcie);
+}
+
 static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
 {
 	struct device *dev = pcie->dev;
 	struct resource res;
 	int err, i;
 
+	pcie->phy = devm_phy_optional_get(dev, "pcie");
+	if (IS_ERR(pcie->phy))
+		return PTR_ERR(pcie->phy);
+
 	err = of_address_to_resource(dev->of_node, 0, &res);
 	if (err)
 		return err;
@@ -914,30 +973,17 @@ static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
 	if (IS_ERR(pcie->base))
 		return PTR_ERR(pcie->base);
 
-	pcie->clk = devm_clk_get(dev, "pcie");
-	if (IS_ERR(pcie->clk)) {
-		dev_err(dev, "cannot get platform clock\n");
-		return PTR_ERR(pcie->clk);
-	}
-	err = clk_prepare_enable(pcie->clk);
-	if (err)
-		return err;
-
 	pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
 	if (IS_ERR(pcie->bus_clk)) {
 		dev_err(dev, "cannot get pcie bus clock\n");
-		err = PTR_ERR(pcie->bus_clk);
-		goto fail_clk;
+		return PTR_ERR(pcie->bus_clk);
 	}
-	err = clk_prepare_enable(pcie->bus_clk);
-	if (err)
-		goto fail_clk;
 
 	i = irq_of_parse_and_map(dev->of_node, 0);
 	if (!i) {
 		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 		err = -ENOENT;
-		goto err_map_reg;
+		goto err_irq1;
 	}
 	pcie->msi.irq1 = i;
 
@@ -945,17 +991,15 @@ static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
 	if (!i) {
 		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 		err = -ENOENT;
-		goto err_map_reg;
+		goto err_irq2;
 	}
 	pcie->msi.irq2 = i;
 
 	return 0;
 
-err_map_reg:
-	clk_disable_unprepare(pcie->bus_clk);
-fail_clk:
-	clk_disable_unprepare(pcie->clk);
-
+err_irq2:
+	irq_dispose_mapping(pcie->msi.irq1);
+err_irq1:
 	return err;
 }
 
@@ -1051,63 +1095,28 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
 }
 
 static const struct of_device_id rcar_pcie_of_match[] = {
-	{ .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
+	{ .compatible = "renesas,pcie-r8a7779",
+	  .data = rcar_pcie_phy_init_h1 },
 	{ .compatible = "renesas,pcie-r8a7790",
-	  .data = rcar_pcie_hw_init_gen2 },
+	  .data = rcar_pcie_phy_init_gen2 },
 	{ .compatible = "renesas,pcie-r8a7791",
-	  .data = rcar_pcie_hw_init_gen2 },
+	  .data = rcar_pcie_phy_init_gen2 },
 	{ .compatible = "renesas,pcie-rcar-gen2",
-	  .data = rcar_pcie_hw_init_gen2 },
-	{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
-	{ .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init },
+	  .data = rcar_pcie_phy_init_gen2 },
+	{ .compatible = "renesas,pcie-r8a7795",
+	  .data = rcar_pcie_phy_init_gen3 },
+	{ .compatible = "renesas,pcie-rcar-gen3",
+	  .data = rcar_pcie_phy_init_gen3 },
 	{},
 };
 
-static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
-{
-	int err;
-	struct device *dev = pci->dev;
-	struct device_node *np = dev->of_node;
-	resource_size_t iobase;
-	struct resource_entry *win, *tmp;
-
-	err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
-					       &iobase);
-	if (err)
-		return err;
-
-	err = devm_request_pci_bus_resources(dev, &pci->resources);
-	if (err)
-		goto out_release_res;
-
-	resource_list_for_each_entry_safe(win, tmp, &pci->resources) {
-		struct resource *res = win->res;
-
-		if (resource_type(res) == IORESOURCE_IO) {
-			err = pci_remap_iospace(res, iobase);
-			if (err) {
-				dev_warn(dev, "error %d: failed to map resource %pR\n",
-					 err, res);
-
-				resource_list_destroy_entry(win);
-			}
-		}
-	}
-
-	return 0;
-
-out_release_res:
-	pci_free_resource_list(&pci->resources);
-	return err;
-}
-
 static int rcar_pcie_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct rcar_pcie *pcie;
 	unsigned int data;
 	int err;
-	int (*hw_init_fn)(struct rcar_pcie *);
+	int (*phy_init_fn)(struct rcar_pcie *);
 	struct pci_host_bridge *bridge;
 
 	bridge = pci_alloc_host_bridge(sizeof(*pcie));
@@ -1118,36 +1127,45 @@ static int rcar_pcie_probe(struct platform_device *pdev)
 
 	pcie->dev = dev;
 
-	INIT_LIST_HEAD(&pcie->resources);
-
-	err = rcar_pcie_parse_request_of_pci_ranges(pcie);
+	err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
 	if (err)
 		goto err_free_bridge;
 
+	pm_runtime_enable(pcie->dev);
+	err = pm_runtime_get_sync(pcie->dev);
+	if (err < 0) {
+		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+		goto err_pm_disable;
+	}
+
 	err = rcar_pcie_get_resources(pcie);
 	if (err < 0) {
 		dev_err(dev, "failed to request resources: %d\n", err);
-		goto err_free_resource_list;
+		goto err_pm_put;
+	}
+
+	err = clk_prepare_enable(pcie->bus_clk);
+	if (err) {
+		dev_err(dev, "failed to enable bus clock: %d\n", err);
+		goto err_unmap_msi_irqs;
 	}
 
 	err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
 	if (err)
-		goto err_free_resource_list;
+		goto err_clk_disable;
 
-	pm_runtime_enable(dev);
-	err = pm_runtime_get_sync(dev);
-	if (err < 0) {
-		dev_err(dev, "pm_runtime_get_sync failed\n");
-		goto err_pm_disable;
+	phy_init_fn = of_device_get_match_data(dev);
+	err = phy_init_fn(pcie);
+	if (err) {
+		dev_err(dev, "failed to init PCIe PHY\n");
+		goto err_clk_disable;
 	}
 
 	/* Failure to get a link might just be that no cards are inserted */
-	hw_init_fn = of_device_get_match_data(dev);
-	err = hw_init_fn(pcie);
-	if (err) {
+	if (rcar_pcie_hw_init(pcie)) {
 		dev_info(dev, "PCIe link down\n");
 		err = -ENODEV;
-		goto err_pm_put;
+		goto err_clk_disable;
 	}
 
 	data = rcar_pci_read_reg(pcie, MACSR);
@@ -1159,24 +1177,34 @@ static int rcar_pcie_probe(struct platform_device *pdev)
 			dev_err(dev,
 				"failed to enable MSI support: %d\n",
 				err);
-			goto err_pm_put;
+			goto err_clk_disable;
 		}
 	}
 
 	err = rcar_pcie_enable(pcie);
 	if (err)
-		goto err_pm_put;
+		goto err_msi_teardown;
 
 	return 0;
 
+err_msi_teardown:
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		rcar_pcie_teardown_msi(pcie);
+
+err_clk_disable:
+	clk_disable_unprepare(pcie->bus_clk);
+
+err_unmap_msi_irqs:
+	irq_dispose_mapping(pcie->msi.irq2);
+	irq_dispose_mapping(pcie->msi.irq1);
+
 err_pm_put:
 	pm_runtime_put(dev);
 
 err_pm_disable:
 	pm_runtime_disable(dev);
-
-err_free_resource_list:
 	pci_free_resource_list(&pcie->resources);
+
 err_free_bridge:
 	pci_free_host_bridge(bridge);
 
diff --git a/drivers/pci/host/pcie-rockchip-ep.c b/drivers/pci/host/pcie-rockchip-ep.c
new file mode 100644
index 000000000000..fc267a49a932
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip-ep.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe endpoint controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *         Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/configfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pci-epc.h>
+#include <linux/platform_device.h>
+#include <linux/pci-epf.h>
+#include <linux/sizes.h>
+
+#include "pcie-rockchip.h"
+
+/**
+ * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
+ * @rockchip: Rockchip PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ *		   dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ *		  the sending of a memory write (MSI) / normal message (legacy
+ *		  IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ *		  dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ *		the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct rockchip_pcie_ep {
+	struct rockchip_pcie	rockchip;
+	struct pci_epc		*epc;
+	u32			max_regions;
+	unsigned long		ob_region_map;
+	phys_addr_t		*ob_addr;
+	phys_addr_t		irq_phys_addr;
+	void __iomem		*irq_cpu_addr;
+	u64			irq_pci_addr;
+	u8			irq_pci_fn;
+	u8			irq_pending;
+};
+
+static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
+					  u32 region)
+{
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region));
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region));
+}
+
+static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
+					 u32 r, u32 type, u64 cpu_addr,
+					 u64 pci_addr, size_t size)
+{
+	u64 sz = 1ULL << fls64(size - 1);
+	int num_pass_bits = ilog2(sz);
+	u32 addr0, addr1, desc0, desc1;
+	bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG);
+
+	/* The minimal region size is 1MB */
+	if (num_pass_bits < 8)
+		num_pass_bits = 8;
+
+	cpu_addr -= rockchip->mem_res->start;
+	addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) &
+		PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+		(lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+	addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr);
+	desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type;
+	desc1 = 0;
+
+	if (is_nor_msg) {
+		rockchip_pcie_write(rockchip, 0,
+				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+		rockchip_pcie_write(rockchip, 0,
+				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+		rockchip_pcie_write(rockchip, desc0,
+				    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+		rockchip_pcie_write(rockchip, desc1,
+				    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+	} else {
+		/* PCI bus address region */
+		rockchip_pcie_write(rockchip, addr0,
+				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+		rockchip_pcie_write(rockchip, addr1,
+				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+		rockchip_pcie_write(rockchip, desc0,
+				    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+		rockchip_pcie_write(rockchip, desc1,
+				    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+
+		addr0 =
+		    ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+		    (lower_32_bits(cpu_addr) &
+		     PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+		addr1 = upper_32_bits(cpu_addr);
+	}
+
+	/* CPU bus address region */
+	rockchip_pcie_write(rockchip, addr0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r));
+	rockchip_pcie_write(rockchip, addr1,
+			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
+}
+
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+					 struct pci_epf_header *hdr)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+
+	/* All functions share the same vendor ID with function 0 */
+	if (fn == 0) {
+		u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
+			       (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
+
+		rockchip_pcie_write(rockchip, vid_regs,
+				    PCIE_CORE_CONFIG_VENDOR);
+	}
+
+	rockchip_pcie_write(rockchip, hdr->deviceid << 16,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID);
+
+	rockchip_pcie_write(rockchip,
+			    hdr->revid |
+			    hdr->progif_code << 8 |
+			    hdr->subclass_code << 16 |
+			    hdr->baseclass_code << 24,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
+	rockchip_pcie_write(rockchip, hdr->cache_line_size,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+			    PCI_CACHE_LINE_SIZE);
+	rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+			    PCI_SUBSYSTEM_VENDOR_ID);
+	rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+			    PCI_INTERRUPT_LINE);
+
+	return 0;
+}
+
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+				    struct pci_epf_bar *epf_bar)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	dma_addr_t bar_phys = epf_bar->phys_addr;
+	enum pci_barno bar = epf_bar->barno;
+	int flags = epf_bar->flags;
+	u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+	u64 sz;
+
+	/* BAR size is 2^(aperture + 7) */
+	sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
+
+	/*
+	 * roundup_pow_of_two() returns an unsigned long, which is not suited
+	 * for 64bit values.
+	 */
+	sz = 1ULL << fls64(sz - 1);
+	aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
+
+	if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+		ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
+	} else {
+		bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
+		bool is_64bits = sz > SZ_2G;
+
+		if (is_64bits && (bar & 1))
+			return -EINVAL;
+
+		if (is_64bits && is_prefetch)
+			ctrl =
+			    ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
+		else if (is_prefetch)
+			ctrl =
+			    ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
+		else if (is_64bits)
+			ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
+		else
+			ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
+	}
+
+	if (bar < BAR_4) {
+		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+		b = bar;
+	} else {
+		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+		b = bar - BAR_4;
+	}
+
+	addr0 = lower_32_bits(bar_phys);
+	addr1 = upper_32_bits(bar_phys);
+
+	cfg = rockchip_pcie_read(rockchip, reg);
+	cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+	cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+		ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+
+	rockchip_pcie_write(rockchip, cfg, reg);
+	rockchip_pcie_write(rockchip, addr0,
+			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+	rockchip_pcie_write(rockchip, addr1,
+			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+
+	return 0;
+}
+
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+				       struct pci_epf_bar *epf_bar)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u32 reg, cfg, b, ctrl;
+	enum pci_barno bar = epf_bar->barno;
+
+	if (bar < BAR_4) {
+		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+		b = bar;
+	} else {
+		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+		b = bar - BAR_4;
+	}
+
+	ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
+	cfg = rockchip_pcie_read(rockchip, reg);
+	cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+	cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+
+	rockchip_pcie_write(rockchip, cfg, reg);
+	rockchip_pcie_write(rockchip, 0x0,
+			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+	rockchip_pcie_write(rockchip, 0x0,
+			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+}
+
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+				     phys_addr_t addr, u64 pci_addr,
+				     size_t size)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *pcie = &ep->rockchip;
+	u32 r;
+
+	r = find_first_zero_bit(&ep->ob_region_map,
+				sizeof(ep->ob_region_map) * BITS_PER_LONG);
+	/*
+	 * Region 0 is reserved for configuration space and shouldn't
+	 * be used elsewhere per TRM, so leave it out.
+	 */
+	if (r >= ep->max_regions - 1) {
+		dev_err(&epc->dev, "no free outbound region\n");
+		return -EINVAL;
+	}
+
+	rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr,
+				     pci_addr, size);
+
+	set_bit(r, &ep->ob_region_map);
+	ep->ob_addr[r] = addr;
+
+	return 0;
+}
+
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+					phys_addr_t addr)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u32 r;
+
+	for (r = 0; r < ep->max_regions - 1; r++)
+		if (ep->ob_addr[r] == addr)
+			break;
+
+	/*
+	 * Region 0 is reserved for configuration space and shouldn't
+	 * be used elsewhere per TRM, so leave it out.
+	 */
+	if (r == ep->max_regions - 1)
+		return;
+
+	rockchip_pcie_clear_ep_ob_atu(rockchip, r);
+
+	ep->ob_addr[r] = 0;
+	clear_bit(r, &ep->ob_region_map);
+}
+
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
+				    u8 multi_msg_cap)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u16 flags;
+
+	flags = rockchip_pcie_read(rockchip,
+				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+	flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
+	flags |=
+	   ((multi_msg_cap << 1) <<  ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+	   PCI_MSI_FLAGS_64BIT;
+	flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
+	rockchip_pcie_write(rockchip, flags,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+			    ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+	return 0;
+}
+
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u16 flags;
+
+	flags = rockchip_pcie_read(rockchip,
+				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+	if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+		return -EINVAL;
+
+	return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+			ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+}
+
+static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
+					 u8 intx, bool is_asserted)
+{
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u32 r = ep->max_regions - 1;
+	u32 offset;
+	u16 status;
+	u8 msg_code;
+
+	if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
+		     ep->irq_pci_fn != fn)) {
+		rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
+					     AXI_WRAPPER_NOR_MSG,
+					     ep->irq_phys_addr, 0, 0);
+		ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR;
+		ep->irq_pci_fn = fn;
+	}
+
+	intx &= 3;
+	if (is_asserted) {
+		ep->irq_pending |= BIT(intx);
+		msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx;
+	} else {
+		ep->irq_pending &= ~BIT(intx);
+		msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx;
+	}
+
+	status = rockchip_pcie_read(rockchip,
+				    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				    ROCKCHIP_PCIE_EP_CMD_STATUS);
+	status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
+
+	if ((status != 0) ^ (ep->irq_pending != 0)) {
+		status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
+		rockchip_pcie_write(rockchip, status,
+				    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				    ROCKCHIP_PCIE_EP_CMD_STATUS);
+	}
+
+	offset =
+	   ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) |
+	   ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA;
+	writel(0, ep->irq_cpu_addr + offset);
+}
+
+static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
+					    u8 intx)
+{
+	u16 cmd;
+
+	cmd = rockchip_pcie_read(&ep->rockchip,
+				 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				 ROCKCHIP_PCIE_EP_CMD_STATUS);
+
+	if (cmd & PCI_COMMAND_INTX_DISABLE)
+		return -EINVAL;
+
+	/*
+	 * Should add some delay between toggling INTx per TRM vaguely saying
+	 * it depends on some cycles of the AHB bus clock to function it. So
+	 * add sufficient 1ms here.
+	 */
+	rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
+	mdelay(1);
+	rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
+	return 0;
+}
+
+static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
+					 u8 interrupt_num)
+{
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u16 flags, mme, data, data_mask;
+	u8 msi_count;
+	u64 pci_addr, pci_addr_mask = 0xff;
+
+	/* Check MSI enable bit */
+	flags = rockchip_pcie_read(&ep->rockchip,
+				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+	if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+		return -EINVAL;
+
+	/* Get MSI numbers from MME */
+	mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+			ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+	msi_count = 1 << mme;
+	if (!interrupt_num || interrupt_num > msi_count)
+		return -EINVAL;
+
+	/* Set MSI private data */
+	data_mask = msi_count - 1;
+	data = rockchip_pcie_read(rockchip,
+				  ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				  ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+				  PCI_MSI_DATA_64);
+	data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
+
+	/* Get MSI PCI address */
+	pci_addr = rockchip_pcie_read(rockchip,
+				      ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				      ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+				      PCI_MSI_ADDRESS_HI);
+	pci_addr <<= 32;
+	pci_addr |= rockchip_pcie_read(rockchip,
+				       ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				       ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+				       PCI_MSI_ADDRESS_LO);
+	pci_addr &= GENMASK_ULL(63, 2);
+
+	/* Set the outbound region if needed. */
+	if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
+		     ep->irq_pci_fn != fn)) {
+		rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1,
+					     AXI_WRAPPER_MEM_WRITE,
+					     ep->irq_phys_addr,
+					     pci_addr & ~pci_addr_mask,
+					     pci_addr_mask + 1);
+		ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
+		ep->irq_pci_fn = fn;
+	}
+
+	writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+	return 0;
+}
+
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+				      enum pci_epc_irq_type type,
+				      u8 interrupt_num)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
+	case PCI_EPC_IRQ_MSI:
+		return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int rockchip_pcie_ep_start(struct pci_epc *epc)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	struct pci_epf *epf;
+	u32 cfg;
+
+	cfg = BIT(0);
+	list_for_each_entry(epf, &epc->pci_epf, list)
+		cfg |= BIT(epf->func_no);
+
+	rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
+
+	list_for_each_entry(epf, &epc->pci_epf, list)
+		pci_epf_linkup(epf);
+
+	return 0;
+}
+
+static const struct pci_epc_ops rockchip_pcie_epc_ops = {
+	.write_header	= rockchip_pcie_ep_write_header,
+	.set_bar	= rockchip_pcie_ep_set_bar,
+	.clear_bar	= rockchip_pcie_ep_clear_bar,
+	.map_addr	= rockchip_pcie_ep_map_addr,
+	.unmap_addr	= rockchip_pcie_ep_unmap_addr,
+	.set_msi	= rockchip_pcie_ep_set_msi,
+	.get_msi	= rockchip_pcie_ep_get_msi,
+	.raise_irq	= rockchip_pcie_ep_raise_irq,
+	.start		= rockchip_pcie_ep_start,
+};
+
+static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
+				     struct rockchip_pcie_ep *ep)
+{
+	struct device *dev = rockchip->dev;
+	int err;
+
+	err = rockchip_pcie_parse_dt(rockchip);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_get_phys(rockchip);
+	if (err)
+		return err;
+
+	err = of_property_read_u32(dev->of_node,
+				   "rockchip,max-outbound-regions",
+				   &ep->max_regions);
+	if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
+		ep->max_regions = MAX_REGION_LIMIT;
+
+	err = of_property_read_u8(dev->of_node, "max-functions",
+				  &ep->epc->max_functions);
+	if (err < 0)
+		ep->epc->max_functions = 1;
+
+	return 0;
+}
+
+static const struct of_device_id rockchip_pcie_ep_of_match[] = {
+	{ .compatible = "rockchip,rk3399-pcie-ep"},
+	{},
+};
+
+static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rockchip_pcie_ep *ep;
+	struct rockchip_pcie *rockchip;
+	struct pci_epc *epc;
+	size_t max_regions;
+	int err;
+
+	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	rockchip = &ep->rockchip;
+	rockchip->is_rc = false;
+	rockchip->dev = dev;
+
+	epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
+	if (IS_ERR(epc)) {
+		dev_err(dev, "failed to create epc device\n");
+		return PTR_ERR(epc);
+	}
+
+	ep->epc = epc;
+	epc_set_drvdata(epc, ep);
+
+	err = rockchip_pcie_parse_ep_dt(rockchip, ep);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_enable_clocks(rockchip);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_init_port(rockchip);
+	if (err)
+		goto err_disable_clocks;
+
+	/* Establish the link automatically */
+	rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+			    PCIE_CLIENT_CONFIG);
+
+	max_regions = ep->max_regions;
+	ep->ob_addr = devm_kzalloc(dev, max_regions * sizeof(*ep->ob_addr),
+				   GFP_KERNEL);
+
+	if (!ep->ob_addr) {
+		err = -ENOMEM;
+		goto err_uninit_port;
+	}
+
+	/* Only enable function 0 by default */
+	rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+
+	err = pci_epc_mem_init(epc, rockchip->mem_res->start,
+			       resource_size(rockchip->mem_res));
+	if (err < 0) {
+		dev_err(dev, "failed to initialize the memory space\n");
+		goto err_uninit_port;
+	}
+
+	ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+						  SZ_128K);
+	if (!ep->irq_cpu_addr) {
+		dev_err(dev, "failed to reserve memory space for MSI\n");
+		err = -ENOMEM;
+		goto err_epc_mem_exit;
+	}
+
+	ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+
+	return 0;
+err_epc_mem_exit:
+	pci_epc_mem_exit(epc);
+err_uninit_port:
+	rockchip_pcie_deinit_phys(rockchip);
+err_disable_clocks:
+	rockchip_pcie_disable_clocks(rockchip);
+	return err;
+}
+
+static struct platform_driver rockchip_pcie_ep_driver = {
+	.driver = {
+		.name = "rockchip-pcie-ep",
+		.of_match_table = rockchip_pcie_ep_of_match,
+	},
+	.probe = rockchip_pcie_ep_probe,
+};
+
+builtin_platform_driver(rockchip_pcie_ep_driver);
diff --git a/drivers/pci/host/pcie-rockchip-host.c b/drivers/pci/host/pcie-rockchip-host.c
new file mode 100644
index 000000000000..1372d270764f
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip-host.c
@@ -0,0 +1,1142 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe host controller driver
+ *
+ * Copyright (c) 2016 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *         Wenrui Li <wenrui.li@rock-chips.com>
+ *
+ * Bits taken from Synopsys DesignWare Host controller driver and
+ * ARM PCI Host generic driver.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#include "../pci.h"
+#include "pcie-rockchip.h"
+
+static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
+{
+	u32 status;
+
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+	status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
+{
+	u32 status;
+
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+	status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
+{
+	u32 val;
+
+	/* Update Tx credit maximum update interval */
+	val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
+	val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
+	val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000);	/* ns */
+	rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
+}
+
+static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
+				      struct pci_bus *bus, int dev)
+{
+	/* access only one slot on each root port */
+	if (bus->number == rockchip->root_bus_nr && dev > 0)
+		return 0;
+
+	/*
+	 * do not read more than one device on the bus directly attached
+	 * to RC's downstream side.
+	 */
+	if (bus->primary == rockchip->root_bus_nr && dev > 0)
+		return 0;
+
+	return 1;
+}
+
+static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
+{
+	u32 val;
+	u8 map;
+
+	if (rockchip->legacy_phy)
+		return GENMASK(MAX_LANE_NUM - 1, 0);
+
+	val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
+	map = val & PCIE_CORE_LANE_MAP_MASK;
+
+	/* The link may be using a reverse-indexed mapping. */
+	if (val & PCIE_CORE_LANE_MAP_REVERSE)
+		map = bitrev8(map) >> 4;
+
+	return map;
+}
+
+static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
+				     int where, int size, u32 *val)
+{
+	void __iomem *addr;
+
+	addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
+
+	if (!IS_ALIGNED((uintptr_t)addr, size)) {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	if (size == 4) {
+		*val = readl(addr);
+	} else if (size == 2) {
+		*val = readw(addr);
+	} else if (size == 1) {
+		*val = readb(addr);
+	} else {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
+				     int where, int size, u32 val)
+{
+	u32 mask, tmp, offset;
+	void __iomem *addr;
+
+	offset = where & ~0x3;
+	addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
+
+	if (size == 4) {
+		writel(val, addr);
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+	mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+
+	/*
+	 * N.B. This read/modify/write isn't safe in general because it can
+	 * corrupt RW1C bits in adjacent registers.  But the hardware
+	 * doesn't support smaller writes.
+	 */
+	tmp = readl(addr) & mask;
+	tmp |= val << ((where & 0x3) * 8);
+	writel(tmp, addr);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
+				       struct pci_bus *bus, u32 devfn,
+				       int where, int size, u32 *val)
+{
+	u32 busdev;
+
+	busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+				PCI_FUNC(devfn), where);
+
+	if (!IS_ALIGNED(busdev, size)) {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	if (bus->parent->number == rockchip->root_bus_nr)
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE0_CFG);
+	else
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE1_CFG);
+
+	if (size == 4) {
+		*val = readl(rockchip->reg_base + busdev);
+	} else if (size == 2) {
+		*val = readw(rockchip->reg_base + busdev);
+	} else if (size == 1) {
+		*val = readb(rockchip->reg_base + busdev);
+	} else {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
+				       struct pci_bus *bus, u32 devfn,
+				       int where, int size, u32 val)
+{
+	u32 busdev;
+
+	busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+				PCI_FUNC(devfn), where);
+	if (!IS_ALIGNED(busdev, size))
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	if (bus->parent->number == rockchip->root_bus_nr)
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE0_CFG);
+	else
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE1_CFG);
+
+	if (size == 4)
+		writel(val, rockchip->reg_base + busdev);
+	else if (size == 2)
+		writew(val, rockchip->reg_base + busdev);
+	else if (size == 1)
+		writeb(val, rockchip->reg_base + busdev);
+	else
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+				 int size, u32 *val)
+{
+	struct rockchip_pcie *rockchip = bus->sysdata;
+
+	if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
+		*val = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	if (bus->number == rockchip->root_bus_nr)
+		return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
+
+	return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size,
+					   val);
+}
+
+static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+				 int where, int size, u32 val)
+{
+	struct rockchip_pcie *rockchip = bus->sysdata;
+
+	if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (bus->number == rockchip->root_bus_nr)
+		return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+
+	return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size,
+					   val);
+}
+
+static struct pci_ops rockchip_pcie_ops = {
+	.read = rockchip_pcie_rd_conf,
+	.write = rockchip_pcie_wr_conf,
+};
+
+static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+{
+	int curr;
+	u32 status, scale, power;
+
+	if (IS_ERR(rockchip->vpcie3v3))
+		return;
+
+	/*
+	 * Set RC's captured slot power limit and scale if
+	 * vpcie3v3 available. The default values are both zero
+	 * which means the software should set these two according
+	 * to the actual power supply.
+	 */
+	curr = regulator_get_current_limit(rockchip->vpcie3v3);
+	if (curr <= 0)
+		return;
+
+	scale = 3; /* 0.001x */
+	curr = curr / 1000; /* convert to mA */
+	power = (curr * 3300) / 1000; /* milliwatt */
+	while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+		if (!scale) {
+			dev_warn(rockchip->dev, "invalid power supply\n");
+			return;
+		}
+		scale--;
+		power = power / 10;
+	}
+
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+	status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+		  (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+}
+
+/**
+ * rockchip_pcie_host_init_port - Initialize hardware
+ * @rockchip: PCIe port information
+ */
+static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int err, i = MAX_LANE_NUM;
+	u32 status;
+
+	gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+
+	err = rockchip_pcie_init_port(rockchip);
+	if (err)
+		return err;
+
+	/* Fix the transmitted FTS count desired to exit from L0s. */
+	status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
+	status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+		 (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
+	rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+
+	rockchip_pcie_set_power_limit(rockchip);
+
+	/* Set RC's clock architecture as common clock */
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+	status |= PCI_EXP_LNKSTA_SLC << 16;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+	/* Set RC's RCB to 128 */
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+	status |= PCI_EXP_LNKCTL_RCB;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+	/* Enable Gen1 training */
+	rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+			    PCIE_CLIENT_CONFIG);
+
+	gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+
+	/* 500ms timeout value should be enough for Gen1/2 training */
+	err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
+				 status, PCIE_LINK_UP(status), 20,
+				 500 * USEC_PER_MSEC);
+	if (err) {
+		dev_err(dev, "PCIe link training gen1 timeout!\n");
+		goto err_power_off_phy;
+	}
+
+	if (rockchip->link_gen == 2) {
+		/*
+		 * Enable retrain for gen2. This should be configured only after
+		 * gen1 finished.
+		 */
+		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+		status |= PCI_EXP_LNKCTL_RL;
+		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+		err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+					 status, PCIE_LINK_IS_GEN2(status), 20,
+					 500 * USEC_PER_MSEC);
+		if (err)
+			dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+	}
+
+	/* Check the final link width from negotiated lane counter from MGMT */
+	status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+	status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+			  PCIE_CORE_PL_CONF_LANE_SHIFT);
+	dev_dbg(dev, "current link width is x%d\n", status);
+
+	/* Power off unused lane(s) */
+	rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
+	for (i = 0; i < MAX_LANE_NUM; i++) {
+		if (!(rockchip->lanes_map & BIT(i))) {
+			dev_dbg(dev, "idling lane %d\n", i);
+			phy_power_off(rockchip->phys[i]);
+		}
+	}
+
+	rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+			    PCIE_CORE_CONFIG_VENDOR);
+	rockchip_pcie_write(rockchip,
+			    PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
+			    PCIE_RC_CONFIG_RID_CCR);
+
+	/* Clear THP cap's next cap pointer to remove L1 substate cap */
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
+	status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
+
+	/* Clear L0s from RC's link cap */
+	if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
+		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
+		status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
+		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
+	}
+
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
+	status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
+	status |= PCIE_RC_CONFIG_DCSR_MPS_256;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
+
+	return 0;
+err_power_off_phy:
+	while (i--)
+		phy_power_off(rockchip->phys[i]);
+	i = MAX_LANE_NUM;
+	while (i--)
+		phy_exit(rockchip->phys[i]);
+	return err;
+}
+
+static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
+{
+	struct rockchip_pcie *rockchip = arg;
+	struct device *dev = rockchip->dev;
+	u32 reg;
+	u32 sub_reg;
+
+	reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+	if (reg & PCIE_CLIENT_INT_LOCAL) {
+		dev_dbg(dev, "local interrupt received\n");
+		sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
+		if (sub_reg & PCIE_CORE_INT_PRFPE)
+			dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
+
+		if (sub_reg & PCIE_CORE_INT_CRFPE)
+			dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
+
+		if (sub_reg & PCIE_CORE_INT_RRPE)
+			dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
+
+		if (sub_reg & PCIE_CORE_INT_PRFO)
+			dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
+
+		if (sub_reg & PCIE_CORE_INT_CRFO)
+			dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
+
+		if (sub_reg & PCIE_CORE_INT_RT)
+			dev_dbg(dev, "replay timer timed out\n");
+
+		if (sub_reg & PCIE_CORE_INT_RTR)
+			dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
+
+		if (sub_reg & PCIE_CORE_INT_PE)
+			dev_dbg(dev, "phy error detected on receive side\n");
+
+		if (sub_reg & PCIE_CORE_INT_MTR)
+			dev_dbg(dev, "malformed TLP received from the link\n");
+
+		if (sub_reg & PCIE_CORE_INT_UCR)
+			dev_dbg(dev, "malformed TLP received from the link\n");
+
+		if (sub_reg & PCIE_CORE_INT_FCE)
+			dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
+
+		if (sub_reg & PCIE_CORE_INT_CT)
+			dev_dbg(dev, "a request timed out waiting for completion\n");
+
+		if (sub_reg & PCIE_CORE_INT_UTC)
+			dev_dbg(dev, "unmapped TC error\n");
+
+		if (sub_reg & PCIE_CORE_INT_MMVC)
+			dev_dbg(dev, "MSI mask register changes\n");
+
+		rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
+	} else if (reg & PCIE_CLIENT_INT_PHY) {
+		dev_dbg(dev, "phy link changes\n");
+		rockchip_pcie_update_txcredit_mui(rockchip);
+		rockchip_pcie_clr_bw_int(rockchip);
+	}
+
+	rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
+			    PCIE_CLIENT_INT_STATUS);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
+{
+	struct rockchip_pcie *rockchip = arg;
+	struct device *dev = rockchip->dev;
+	u32 reg;
+
+	reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+	if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
+		dev_dbg(dev, "legacy done interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_MSG)
+		dev_dbg(dev, "message done interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_HOT_RST)
+		dev_dbg(dev, "hot reset interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_DPA)
+		dev_dbg(dev, "dpa interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_FATAL_ERR)
+		dev_dbg(dev, "fatal error interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
+		dev_dbg(dev, "no fatal error interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_CORR_ERR)
+		dev_dbg(dev, "correctable error interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_PHY)
+		dev_dbg(dev, "phy interrupt received\n");
+
+	rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
+			      PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
+			      PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
+			      PCIE_CLIENT_INT_NFATAL_ERR |
+			      PCIE_CLIENT_INT_CORR_ERR |
+			      PCIE_CLIENT_INT_PHY),
+		   PCIE_CLIENT_INT_STATUS);
+
+	return IRQ_HANDLED;
+}
+
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+	struct device *dev = rockchip->dev;
+	u32 reg;
+	u32 hwirq;
+	u32 virq;
+
+	chained_irq_enter(chip, desc);
+
+	reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+	reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
+
+	while (reg) {
+		hwirq = ffs(reg) - 1;
+		reg &= ~BIT(hwirq);
+
+		virq = irq_find_mapping(rockchip->irq_domain, hwirq);
+		if (virq)
+			generic_handle_irq(virq);
+		else
+			dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+{
+	int irq, err;
+	struct device *dev = rockchip->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	irq = platform_get_irq_byname(pdev, "sys");
+	if (irq < 0) {
+		dev_err(dev, "missing sys IRQ resource\n");
+		return irq;
+	}
+
+	err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
+			       IRQF_SHARED, "pcie-sys", rockchip);
+	if (err) {
+		dev_err(dev, "failed to request PCIe subsystem IRQ\n");
+		return err;
+	}
+
+	irq = platform_get_irq_byname(pdev, "legacy");
+	if (irq < 0) {
+		dev_err(dev, "missing legacy IRQ resource\n");
+		return irq;
+	}
+
+	irq_set_chained_handler_and_data(irq,
+					 rockchip_pcie_legacy_int_handler,
+					 rockchip);
+
+	irq = platform_get_irq_byname(pdev, "client");
+	if (irq < 0) {
+		dev_err(dev, "missing client IRQ resource\n");
+		return irq;
+	}
+
+	err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
+			       IRQF_SHARED, "pcie-client", rockchip);
+	if (err) {
+		dev_err(dev, "failed to request PCIe client IRQ\n");
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * rockchip_pcie_parse_host_dt - Parse Device Tree
+ * @rockchip: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int err;
+
+	err = rockchip_pcie_parse_dt(rockchip);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_setup_irq(rockchip);
+	if (err)
+		return err;
+
+	rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+	if (IS_ERR(rockchip->vpcie12v)) {
+		if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		dev_info(dev, "no vpcie12v regulator found\n");
+	}
+
+	rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+	if (IS_ERR(rockchip->vpcie3v3)) {
+		if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		dev_info(dev, "no vpcie3v3 regulator found\n");
+	}
+
+	rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
+	if (IS_ERR(rockchip->vpcie1v8)) {
+		if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		dev_info(dev, "no vpcie1v8 regulator found\n");
+	}
+
+	rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
+	if (IS_ERR(rockchip->vpcie0v9)) {
+		if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		dev_info(dev, "no vpcie0v9 regulator found\n");
+	}
+
+	return 0;
+}
+
+static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int err;
+
+	if (!IS_ERR(rockchip->vpcie12v)) {
+		err = regulator_enable(rockchip->vpcie12v);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie12v regulator\n");
+			goto err_out;
+		}
+	}
+
+	if (!IS_ERR(rockchip->vpcie3v3)) {
+		err = regulator_enable(rockchip->vpcie3v3);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie3v3 regulator\n");
+			goto err_disable_12v;
+		}
+	}
+
+	if (!IS_ERR(rockchip->vpcie1v8)) {
+		err = regulator_enable(rockchip->vpcie1v8);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+			goto err_disable_3v3;
+		}
+	}
+
+	if (!IS_ERR(rockchip->vpcie0v9)) {
+		err = regulator_enable(rockchip->vpcie0v9);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+			goto err_disable_1v8;
+		}
+	}
+
+	return 0;
+
+err_disable_1v8:
+	if (!IS_ERR(rockchip->vpcie1v8))
+		regulator_disable(rockchip->vpcie1v8);
+err_disable_3v3:
+	if (!IS_ERR(rockchip->vpcie3v3))
+		regulator_disable(rockchip->vpcie3v3);
+err_disable_12v:
+	if (!IS_ERR(rockchip->vpcie12v))
+		regulator_disable(rockchip->vpcie12v);
+err_out:
+	return err;
+}
+
+static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
+{
+	rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
+			    (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
+	rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
+			    PCIE_CORE_INT_MASK);
+
+	rockchip_pcie_enable_bw_int(rockchip);
+}
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+				  irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	struct device_node *intc = of_get_next_child(dev->of_node, NULL);
+
+	if (!intc) {
+		dev_err(dev, "missing child interrupt-controller node\n");
+		return -EINVAL;
+	}
+
+	rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+						    &intx_domain_ops, rockchip);
+	if (!rockchip->irq_domain) {
+		dev_err(dev, "failed to get a INTx IRQ domain\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
+				     int region_no, int type, u8 num_pass_bits,
+				     u32 lower_addr, u32 upper_addr)
+{
+	u32 ob_addr_0;
+	u32 ob_addr_1;
+	u32 ob_desc_0;
+	u32 aw_offset;
+
+	if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
+		return -EINVAL;
+	if (num_pass_bits + 1 < 8)
+		return -EINVAL;
+	if (num_pass_bits > 63)
+		return -EINVAL;
+	if (region_no == 0) {
+		if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
+			return -EINVAL;
+	}
+	if (region_no != 0) {
+		if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
+			return -EINVAL;
+	}
+
+	aw_offset = (region_no << OB_REG_SIZE_SHIFT);
+
+	ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
+	ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
+	ob_addr_1 = upper_addr;
+	ob_desc_0 = (1 << 23 | type);
+
+	rockchip_pcie_write(rockchip, ob_addr_0,
+			    PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
+	rockchip_pcie_write(rockchip, ob_addr_1,
+			    PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
+	rockchip_pcie_write(rockchip, ob_desc_0,
+			    PCIE_CORE_OB_REGION_DESC0 + aw_offset);
+	rockchip_pcie_write(rockchip, 0,
+			    PCIE_CORE_OB_REGION_DESC1 + aw_offset);
+
+	return 0;
+}
+
+static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
+				     int region_no, u8 num_pass_bits,
+				     u32 lower_addr, u32 upper_addr)
+{
+	u32 ib_addr_0;
+	u32 ib_addr_1;
+	u32 aw_offset;
+
+	if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
+		return -EINVAL;
+	if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
+		return -EINVAL;
+	if (num_pass_bits > 63)
+		return -EINVAL;
+
+	aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
+
+	ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
+	ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
+	ib_addr_1 = upper_addr;
+
+	rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
+	rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
+
+	return 0;
+}
+
+static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int offset;
+	int err;
+	int reg_no;
+
+	rockchip_pcie_cfg_configuration_accesses(rockchip,
+						 AXI_WRAPPER_TYPE0_CFG);
+
+	for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+		err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+						AXI_WRAPPER_MEM_WRITE,
+						20 - 1,
+						rockchip->mem_bus_addr +
+						(reg_no << 20),
+						0);
+		if (err) {
+			dev_err(dev, "program RC mem outbound ATU failed\n");
+			return err;
+		}
+	}
+
+	err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+	if (err) {
+		dev_err(dev, "program RC mem inbound ATU failed\n");
+		return err;
+	}
+
+	offset = rockchip->mem_size >> 20;
+	for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+		err = rockchip_pcie_prog_ob_atu(rockchip,
+						reg_no + 1 + offset,
+						AXI_WRAPPER_IO_WRITE,
+						20 - 1,
+						rockchip->io_bus_addr +
+						(reg_no << 20),
+						0);
+		if (err) {
+			dev_err(dev, "program RC io outbound ATU failed\n");
+			return err;
+		}
+	}
+
+	/* assign message regions */
+	rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
+				  AXI_WRAPPER_NOR_MSG,
+				  20 - 1, 0, 0);
+
+	rockchip->msg_bus_addr = rockchip->mem_bus_addr +
+					((reg_no + offset) << 20);
+	return err;
+}
+
+static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
+{
+	u32 value;
+	int err;
+
+	/* send PME_TURN_OFF message */
+	writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
+
+	/* read LTSSM and wait for falling into L2 link state */
+	err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
+				 value, PCIE_LINK_IS_L2(value), 20,
+				 jiffies_to_usecs(5 * HZ));
+	if (err) {
+		dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
+{
+	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+	int ret;
+
+	/* disable core and cli int since we don't need to ack PME_ACK */
+	rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
+			    PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
+	rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
+
+	ret = rockchip_pcie_wait_l2(rockchip);
+	if (ret) {
+		rockchip_pcie_enable_interrupts(rockchip);
+		return ret;
+	}
+
+	rockchip_pcie_deinit_phys(rockchip);
+
+	rockchip_pcie_disable_clocks(rockchip);
+
+	if (!IS_ERR(rockchip->vpcie0v9))
+		regulator_disable(rockchip->vpcie0v9);
+
+	return ret;
+}
+
+static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
+{
+	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+	int err;
+
+	if (!IS_ERR(rockchip->vpcie0v9)) {
+		err = regulator_enable(rockchip->vpcie0v9);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+			return err;
+		}
+	}
+
+	err = rockchip_pcie_enable_clocks(rockchip);
+	if (err)
+		goto err_disable_0v9;
+
+	err = rockchip_pcie_host_init_port(rockchip);
+	if (err)
+		goto err_pcie_resume;
+
+	err = rockchip_pcie_cfg_atu(rockchip);
+	if (err)
+		goto err_err_deinit_port;
+
+	/* Need this to enter L1 again */
+	rockchip_pcie_update_txcredit_mui(rockchip);
+	rockchip_pcie_enable_interrupts(rockchip);
+
+	return 0;
+
+err_err_deinit_port:
+	rockchip_pcie_deinit_phys(rockchip);
+err_pcie_resume:
+	rockchip_pcie_disable_clocks(rockchip);
+err_disable_0v9:
+	if (!IS_ERR(rockchip->vpcie0v9))
+		regulator_disable(rockchip->vpcie0v9);
+	return err;
+}
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+	struct rockchip_pcie *rockchip;
+	struct device *dev = &pdev->dev;
+	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
+	struct resource_entry *win;
+	resource_size_t io_base;
+	struct resource	*mem;
+	struct resource	*io;
+	int err;
+
+	LIST_HEAD(res);
+
+	if (!dev->of_node)
+		return -ENODEV;
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
+	if (!bridge)
+		return -ENOMEM;
+
+	rockchip = pci_host_bridge_priv(bridge);
+
+	platform_set_drvdata(pdev, rockchip);
+	rockchip->dev = dev;
+	rockchip->is_rc = true;
+
+	err = rockchip_pcie_parse_host_dt(rockchip);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_enable_clocks(rockchip);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_set_vpcie(rockchip);
+	if (err) {
+		dev_err(dev, "failed to set vpcie regulator\n");
+		goto err_set_vpcie;
+	}
+
+	err = rockchip_pcie_host_init_port(rockchip);
+	if (err)
+		goto err_vpcie;
+
+	rockchip_pcie_enable_interrupts(rockchip);
+
+	err = rockchip_pcie_init_irq_domain(rockchip);
+	if (err < 0)
+		goto err_deinit_port;
+
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    &res, &io_base);
+	if (err)
+		goto err_remove_irq_domain;
+
+	err = devm_request_pci_bus_resources(dev, &res);
+	if (err)
+		goto err_free_res;
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry(win, &res) {
+		switch (resource_type(win->res)) {
+		case IORESOURCE_IO:
+			io = win->res;
+			io->name = "I/O";
+			rockchip->io_size = resource_size(io);
+			rockchip->io_bus_addr = io->start - win->offset;
+			err = pci_remap_iospace(io, io_base);
+			if (err) {
+				dev_warn(dev, "error %d: failed to map resource %pR\n",
+					 err, io);
+				continue;
+			}
+			rockchip->io = io;
+			break;
+		case IORESOURCE_MEM:
+			mem = win->res;
+			mem->name = "MEM";
+			rockchip->mem_size = resource_size(mem);
+			rockchip->mem_bus_addr = mem->start - win->offset;
+			break;
+		case IORESOURCE_BUS:
+			rockchip->root_bus_nr = win->res->start;
+			break;
+		default:
+			continue;
+		}
+	}
+
+	err = rockchip_pcie_cfg_atu(rockchip);
+	if (err)
+		goto err_unmap_iospace;
+
+	rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
+	if (!rockchip->msg_region) {
+		err = -ENOMEM;
+		goto err_unmap_iospace;
+	}
+
+	list_splice_init(&res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = rockchip;
+	bridge->busnr = 0;
+	bridge->ops = &rockchip_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	err = pci_scan_root_bus_bridge(bridge);
+	if (err < 0)
+		goto err_unmap_iospace;
+
+	bus = bridge->bus;
+
+	rockchip->root_bus = bus;
+
+	pci_bus_size_bridges(bus);
+	pci_bus_assign_resources(bus);
+	list_for_each_entry(child, &bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(bus);
+	return 0;
+
+err_unmap_iospace:
+	pci_unmap_iospace(rockchip->io);
+err_free_res:
+	pci_free_resource_list(&res);
+err_remove_irq_domain:
+	irq_domain_remove(rockchip->irq_domain);
+err_deinit_port:
+	rockchip_pcie_deinit_phys(rockchip);
+err_vpcie:
+	if (!IS_ERR(rockchip->vpcie12v))
+		regulator_disable(rockchip->vpcie12v);
+	if (!IS_ERR(rockchip->vpcie3v3))
+		regulator_disable(rockchip->vpcie3v3);
+	if (!IS_ERR(rockchip->vpcie1v8))
+		regulator_disable(rockchip->vpcie1v8);
+	if (!IS_ERR(rockchip->vpcie0v9))
+		regulator_disable(rockchip->vpcie0v9);
+err_set_vpcie:
+	rockchip_pcie_disable_clocks(rockchip);
+	return err;
+}
+
+static int rockchip_pcie_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+
+	pci_stop_root_bus(rockchip->root_bus);
+	pci_remove_root_bus(rockchip->root_bus);
+	pci_unmap_iospace(rockchip->io);
+	irq_domain_remove(rockchip->irq_domain);
+
+	rockchip_pcie_deinit_phys(rockchip);
+
+	rockchip_pcie_disable_clocks(rockchip);
+
+	if (!IS_ERR(rockchip->vpcie12v))
+		regulator_disable(rockchip->vpcie12v);
+	if (!IS_ERR(rockchip->vpcie3v3))
+		regulator_disable(rockchip->vpcie3v3);
+	if (!IS_ERR(rockchip->vpcie1v8))
+		regulator_disable(rockchip->vpcie1v8);
+	if (!IS_ERR(rockchip->vpcie0v9))
+		regulator_disable(rockchip->vpcie0v9);
+
+	return 0;
+}
+
+static const struct dev_pm_ops rockchip_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
+				      rockchip_pcie_resume_noirq)
+};
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+	{ .compatible = "rockchip,rk3399-pcie", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
+
+static struct platform_driver rockchip_pcie_driver = {
+	.driver = {
+		.name = "rockchip-pcie",
+		.of_match_table = rockchip_pcie_of_match,
+		.pm = &rockchip_pcie_pm_ops,
+	},
+	.probe = rockchip_pcie_probe,
+	.remove = rockchip_pcie_remove,
+};
+module_platform_driver(rockchip_pcie_driver);
+
+MODULE_AUTHOR("Rockchip Inc");
+MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index f1e8f97ea1fb..c53d1322a3d6 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -11,535 +11,154 @@
  * ARM PCI Host generic driver.
  */
 
-#include <linux/bitrev.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/iopoll.h>
-#include <linux/irq.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
 #include <linux/of_pci.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
-#include <linux/regmap.h>
 
-/*
- * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
- * bits.  This allows atomic updates of the register without locking.
- */
-#define HIWORD_UPDATE(mask, val)	(((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val)		HIWORD_UPDATE(val, val)
-
-#define ENCODE_LANES(x)			((((x) >> 1) & 3) << 4)
-#define MAX_LANE_NUM			4
-
-#define PCIE_CLIENT_BASE		0x0
-#define PCIE_CLIENT_CONFIG		(PCIE_CLIENT_BASE + 0x00)
-#define   PCIE_CLIENT_CONF_ENABLE	  HIWORD_UPDATE_BIT(0x0001)
-#define   PCIE_CLIENT_LINK_TRAIN_ENABLE	  HIWORD_UPDATE_BIT(0x0002)
-#define   PCIE_CLIENT_ARI_ENABLE	  HIWORD_UPDATE_BIT(0x0008)
-#define   PCIE_CLIENT_CONF_LANE_NUM(x)	  HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
-#define   PCIE_CLIENT_MODE_RC		  HIWORD_UPDATE_BIT(0x0040)
-#define   PCIE_CLIENT_GEN_SEL_1		  HIWORD_UPDATE(0x0080, 0)
-#define   PCIE_CLIENT_GEN_SEL_2		  HIWORD_UPDATE_BIT(0x0080)
-#define PCIE_CLIENT_DEBUG_OUT_0		(PCIE_CLIENT_BASE + 0x3c)
-#define   PCIE_CLIENT_DEBUG_LTSSM_MASK		GENMASK(5, 0)
-#define   PCIE_CLIENT_DEBUG_LTSSM_L1		0x18
-#define   PCIE_CLIENT_DEBUG_LTSSM_L2		0x19
-#define PCIE_CLIENT_BASIC_STATUS1	(PCIE_CLIENT_BASE + 0x48)
-#define   PCIE_CLIENT_LINK_STATUS_UP		0x00300000
-#define   PCIE_CLIENT_LINK_STATUS_MASK		0x00300000
-#define PCIE_CLIENT_INT_MASK		(PCIE_CLIENT_BASE + 0x4c)
-#define PCIE_CLIENT_INT_STATUS		(PCIE_CLIENT_BASE + 0x50)
-#define   PCIE_CLIENT_INTR_MASK			GENMASK(8, 5)
-#define   PCIE_CLIENT_INTR_SHIFT		5
-#define   PCIE_CLIENT_INT_LEGACY_DONE		BIT(15)
-#define   PCIE_CLIENT_INT_MSG			BIT(14)
-#define   PCIE_CLIENT_INT_HOT_RST		BIT(13)
-#define   PCIE_CLIENT_INT_DPA			BIT(12)
-#define   PCIE_CLIENT_INT_FATAL_ERR		BIT(11)
-#define   PCIE_CLIENT_INT_NFATAL_ERR		BIT(10)
-#define   PCIE_CLIENT_INT_CORR_ERR		BIT(9)
-#define   PCIE_CLIENT_INT_INTD			BIT(8)
-#define   PCIE_CLIENT_INT_INTC			BIT(7)
-#define   PCIE_CLIENT_INT_INTB			BIT(6)
-#define   PCIE_CLIENT_INT_INTA			BIT(5)
-#define   PCIE_CLIENT_INT_LOCAL			BIT(4)
-#define   PCIE_CLIENT_INT_UDMA			BIT(3)
-#define   PCIE_CLIENT_INT_PHY			BIT(2)
-#define   PCIE_CLIENT_INT_HOT_PLUG		BIT(1)
-#define   PCIE_CLIENT_INT_PWR_STCG		BIT(0)
-
-#define PCIE_CLIENT_INT_LEGACY \
-	(PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
-	PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
-
-#define PCIE_CLIENT_INT_CLI \
-	(PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
-	PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
-	PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
-	PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
-	PCIE_CLIENT_INT_PHY)
-
-#define PCIE_CORE_CTRL_MGMT_BASE	0x900000
-#define PCIE_CORE_CTRL			(PCIE_CORE_CTRL_MGMT_BASE + 0x000)
-#define   PCIE_CORE_PL_CONF_SPEED_5G		0x00000008
-#define   PCIE_CORE_PL_CONF_SPEED_MASK		0x00000018
-#define   PCIE_CORE_PL_CONF_LANE_MASK		0x00000006
-#define   PCIE_CORE_PL_CONF_LANE_SHIFT		1
-#define PCIE_CORE_CTRL_PLC1		(PCIE_CORE_CTRL_MGMT_BASE + 0x004)
-#define   PCIE_CORE_CTRL_PLC1_FTS_MASK		GENMASK(23, 8)
-#define   PCIE_CORE_CTRL_PLC1_FTS_SHIFT		8
-#define   PCIE_CORE_CTRL_PLC1_FTS_CNT		0xffff
-#define PCIE_CORE_TXCREDIT_CFG1		(PCIE_CORE_CTRL_MGMT_BASE + 0x020)
-#define   PCIE_CORE_TXCREDIT_CFG1_MUI_MASK	0xFFFF0000
-#define   PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT	16
-#define   PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
-		(((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
-#define PCIE_CORE_LANE_MAP             (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
-#define   PCIE_CORE_LANE_MAP_MASK              0x0000000f
-#define   PCIE_CORE_LANE_MAP_REVERSE           BIT(16)
-#define PCIE_CORE_INT_STATUS		(PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
-#define   PCIE_CORE_INT_PRFPE			BIT(0)
-#define   PCIE_CORE_INT_CRFPE			BIT(1)
-#define   PCIE_CORE_INT_RRPE			BIT(2)
-#define   PCIE_CORE_INT_PRFO			BIT(3)
-#define   PCIE_CORE_INT_CRFO			BIT(4)
-#define   PCIE_CORE_INT_RT			BIT(5)
-#define   PCIE_CORE_INT_RTR			BIT(6)
-#define   PCIE_CORE_INT_PE			BIT(7)
-#define   PCIE_CORE_INT_MTR			BIT(8)
-#define   PCIE_CORE_INT_UCR			BIT(9)
-#define   PCIE_CORE_INT_FCE			BIT(10)
-#define   PCIE_CORE_INT_CT			BIT(11)
-#define   PCIE_CORE_INT_UTC			BIT(18)
-#define   PCIE_CORE_INT_MMVC			BIT(19)
-#define PCIE_CORE_CONFIG_VENDOR		(PCIE_CORE_CTRL_MGMT_BASE + 0x44)
-#define PCIE_CORE_INT_MASK		(PCIE_CORE_CTRL_MGMT_BASE + 0x210)
-#define PCIE_RC_BAR_CONF		(PCIE_CORE_CTRL_MGMT_BASE + 0x300)
-
-#define PCIE_CORE_INT \
-		(PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
-		 PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
-		 PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
-		 PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
-		 PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
-		 PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
-		 PCIE_CORE_INT_MMVC)
-
-#define PCIE_RC_CONFIG_NORMAL_BASE	0x800000
-#define PCIE_RC_CONFIG_BASE		0xa00000
-#define PCIE_RC_CONFIG_RID_CCR		(PCIE_RC_CONFIG_BASE + 0x08)
-#define   PCIE_RC_CONFIG_SCC_SHIFT		16
-#define PCIE_RC_CONFIG_DCR		(PCIE_RC_CONFIG_BASE + 0xc4)
-#define   PCIE_RC_CONFIG_DCR_CSPL_SHIFT		18
-#define   PCIE_RC_CONFIG_DCR_CSPL_LIMIT		0xff
-#define   PCIE_RC_CONFIG_DCR_CPLS_SHIFT		26
-#define PCIE_RC_CONFIG_DCSR		(PCIE_RC_CONFIG_BASE + 0xc8)
-#define   PCIE_RC_CONFIG_DCSR_MPS_MASK		GENMASK(7, 5)
-#define   PCIE_RC_CONFIG_DCSR_MPS_256		(0x1 << 5)
-#define PCIE_RC_CONFIG_LINK_CAP		(PCIE_RC_CONFIG_BASE + 0xcc)
-#define   PCIE_RC_CONFIG_LINK_CAP_L0S		BIT(10)
-#define PCIE_RC_CONFIG_LCS		(PCIE_RC_CONFIG_BASE + 0xd0)
-#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
-#define PCIE_RC_CONFIG_THP_CAP		(PCIE_RC_CONFIG_BASE + 0x274)
-#define   PCIE_RC_CONFIG_THP_CAP_NEXT_MASK	GENMASK(31, 20)
-
-#define PCIE_CORE_AXI_CONF_BASE		0xc00000
-#define PCIE_CORE_OB_REGION_ADDR0	(PCIE_CORE_AXI_CONF_BASE + 0x0)
-#define   PCIE_CORE_OB_REGION_ADDR0_NUM_BITS	0x3f
-#define   PCIE_CORE_OB_REGION_ADDR0_LO_ADDR	0xffffff00
-#define PCIE_CORE_OB_REGION_ADDR1	(PCIE_CORE_AXI_CONF_BASE + 0x4)
-#define PCIE_CORE_OB_REGION_DESC0	(PCIE_CORE_AXI_CONF_BASE + 0x8)
-#define PCIE_CORE_OB_REGION_DESC1	(PCIE_CORE_AXI_CONF_BASE + 0xc)
-
-#define PCIE_CORE_AXI_INBOUND_BASE	0xc00800
-#define PCIE_RP_IB_ADDR0		(PCIE_CORE_AXI_INBOUND_BASE + 0x0)
-#define   PCIE_CORE_IB_REGION_ADDR0_NUM_BITS	0x3f
-#define   PCIE_CORE_IB_REGION_ADDR0_LO_ADDR	0xffffff00
-#define PCIE_RP_IB_ADDR1		(PCIE_CORE_AXI_INBOUND_BASE + 0x4)
-
-/* Size of one AXI Region (not Region 0) */
-#define AXI_REGION_SIZE				BIT(20)
-/* Size of Region 0, equal to sum of sizes of other regions */
-#define AXI_REGION_0_SIZE			(32 * (0x1 << 20))
-#define OB_REG_SIZE_SHIFT			5
-#define IB_ROOT_PORT_REG_SIZE_SHIFT		3
-#define AXI_WRAPPER_IO_WRITE			0x6
-#define AXI_WRAPPER_MEM_WRITE			0x2
-#define AXI_WRAPPER_TYPE0_CFG			0xa
-#define AXI_WRAPPER_TYPE1_CFG			0xb
-#define AXI_WRAPPER_NOR_MSG			0xc
-
-#define MAX_AXI_IB_ROOTPORT_REGION_NUM		3
-#define MIN_AXI_ADDR_BITS_PASSED		8
-#define PCIE_RC_SEND_PME_OFF			0x11960
-#define ROCKCHIP_VENDOR_ID			0x1d87
-#define PCIE_ECAM_BUS(x)			(((x) & 0xff) << 20)
-#define PCIE_ECAM_DEV(x)			(((x) & 0x1f) << 15)
-#define PCIE_ECAM_FUNC(x)			(((x) & 0x7) << 12)
-#define PCIE_ECAM_REG(x)			(((x) & 0xfff) << 0)
-#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
-	  (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
-	   PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
-#define PCIE_LINK_IS_L2(x) \
-	(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
-#define PCIE_LINK_UP(x) \
-	(((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
-#define PCIE_LINK_IS_GEN2(x) \
-	(((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
-
-#define RC_REGION_0_ADDR_TRANS_H		0x00000000
-#define RC_REGION_0_ADDR_TRANS_L		0x00000000
-#define RC_REGION_0_PASS_BITS			(25 - 1)
-#define RC_REGION_0_TYPE_MASK			GENMASK(3, 0)
-#define MAX_AXI_WRAPPER_REGION_NUM		33
-
-struct rockchip_pcie {
-	void	__iomem *reg_base;		/* DT axi-base */
-	void	__iomem *apb_base;		/* DT apb-base */
-	bool    legacy_phy;
-	struct  phy *phys[MAX_LANE_NUM];
-	struct	reset_control *core_rst;
-	struct	reset_control *mgmt_rst;
-	struct	reset_control *mgmt_sticky_rst;
-	struct	reset_control *pipe_rst;
-	struct	reset_control *pm_rst;
-	struct	reset_control *aclk_rst;
-	struct	reset_control *pclk_rst;
-	struct	clk *aclk_pcie;
-	struct	clk *aclk_perf_pcie;
-	struct	clk *hclk_pcie;
-	struct	clk *clk_pcie_pm;
-	struct	regulator *vpcie12v; /* 12V power supply */
-	struct	regulator *vpcie3v3; /* 3.3V power supply */
-	struct	regulator *vpcie1v8; /* 1.8V power supply */
-	struct	regulator *vpcie0v9; /* 0.9V power supply */
-	struct	gpio_desc *ep_gpio;
-	u32	lanes;
-	u8      lanes_map;
-	u8	root_bus_nr;
-	int	link_gen;
-	struct	device *dev;
-	struct	irq_domain *irq_domain;
-	int     offset;
-	struct pci_bus *root_bus;
-	struct resource *io;
-	phys_addr_t io_bus_addr;
-	u32     io_size;
-	void    __iomem *msg_region;
-	u32     mem_size;
-	phys_addr_t msg_bus_addr;
-	phys_addr_t mem_bus_addr;
-};
-
-static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
-{
-	return readl(rockchip->apb_base + reg);
-}
-
-static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
-				u32 reg)
-{
-	writel(val, rockchip->apb_base + reg);
-}
-
-static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
-{
-	u32 status;
-
-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
-	status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-}
-
-static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
-{
-	u32 status;
-
-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
-	status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-}
-
-static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
-{
-	u32 val;
-
-	/* Update Tx credit maximum update interval */
-	val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
-	val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
-	val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000);	/* ns */
-	rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
-}
+#include "../pci.h"
+#include "pcie-rockchip.h"
 
-static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
-				      struct pci_bus *bus, int dev)
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
 {
-	/* access only one slot on each root port */
-	if (bus->number == rockchip->root_bus_nr && dev > 0)
-		return 0;
-
-	/*
-	 * do not read more than one device on the bus directly attached
-	 * to RC's downstream side.
-	 */
-	if (bus->primary == rockchip->root_bus_nr && dev > 0)
-		return 0;
-
-	return 1;
-}
-
-static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
-{
-	u32 val;
-	u8 map;
-
-	if (rockchip->legacy_phy)
-		return GENMASK(MAX_LANE_NUM - 1, 0);
-
-	val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
-	map = val & PCIE_CORE_LANE_MAP_MASK;
-
-	/* The link may be using a reverse-indexed mapping. */
-	if (val & PCIE_CORE_LANE_MAP_REVERSE)
-		map = bitrev8(map) >> 4;
-
-	return map;
-}
-
-static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
-				     int where, int size, u32 *val)
-{
-	void __iomem *addr;
-
-	addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
-
-	if (!IS_ALIGNED((uintptr_t)addr, size)) {
-		*val = 0;
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-	}
+	struct device *dev = rockchip->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct device_node *node = dev->of_node;
+	struct resource *regs;
+	int err;
 
-	if (size == 4) {
-		*val = readl(addr);
-	} else if (size == 2) {
-		*val = readw(addr);
-	} else if (size == 1) {
-		*val = readb(addr);
+	if (rockchip->is_rc) {
+		regs = platform_get_resource_byname(pdev,
+						    IORESOURCE_MEM,
+						    "axi-base");
+		rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
+		if (IS_ERR(rockchip->reg_base))
+			return PTR_ERR(rockchip->reg_base);
 	} else {
-		*val = 0;
-		return PCIBIOS_BAD_REGISTER_NUMBER;
+		rockchip->mem_res =
+			platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						     "mem-base");
+		if (!rockchip->mem_res)
+			return -EINVAL;
 	}
-	return PCIBIOS_SUCCESSFUL;
-}
 
-static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
-				     int where, int size, u32 val)
-{
-	u32 mask, tmp, offset;
-	void __iomem *addr;
+	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					    "apb-base");
+	rockchip->apb_base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(rockchip->apb_base))
+		return PTR_ERR(rockchip->apb_base);
 
-	offset = where & ~0x3;
-	addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
+	err = rockchip_pcie_get_phys(rockchip);
+	if (err)
+		return err;
 
-	if (size == 4) {
-		writel(val, addr);
-		return PCIBIOS_SUCCESSFUL;
+	rockchip->lanes = 1;
+	err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
+	if (!err && (rockchip->lanes == 0 ||
+		     rockchip->lanes == 3 ||
+		     rockchip->lanes > 4)) {
+		dev_warn(dev, "invalid num-lanes, default to use one lane\n");
+		rockchip->lanes = 1;
 	}
 
-	mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
-
-	/*
-	 * N.B. This read/modify/write isn't safe in general because it can
-	 * corrupt RW1C bits in adjacent registers.  But the hardware
-	 * doesn't support smaller writes.
-	 */
-	tmp = readl(addr) & mask;
-	tmp |= val << ((where & 0x3) * 8);
-	writel(tmp, addr);
-
-	return PCIBIOS_SUCCESSFUL;
-}
-
-static void rockchip_pcie_cfg_configuration_accesses(
-		struct rockchip_pcie *rockchip, u32 type)
-{
-	u32 ob_desc_0;
-
-	/* Configuration Accesses for region 0 */
-	rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
-
-	rockchip_pcie_write(rockchip,
-			    (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
-			    PCIE_CORE_OB_REGION_ADDR0);
-	rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
-			    PCIE_CORE_OB_REGION_ADDR1);
-	ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
-	ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
-	ob_desc_0 |= (type | (0x1 << 23));
-	rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
-	rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
-}
-
-static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
-				       struct pci_bus *bus, u32 devfn,
-				       int where, int size, u32 *val)
-{
-	u32 busdev;
-
-	busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
-				PCI_FUNC(devfn), where);
+	rockchip->link_gen = of_pci_get_max_link_speed(node);
+	if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
+		rockchip->link_gen = 2;
 
-	if (!IS_ALIGNED(busdev, size)) {
-		*val = 0;
-		return PCIBIOS_BAD_REGISTER_NUMBER;
+	rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
+	if (IS_ERR(rockchip->core_rst)) {
+		if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing core reset property in node\n");
+		return PTR_ERR(rockchip->core_rst);
 	}
 
-	if (bus->parent->number == rockchip->root_bus_nr)
-		rockchip_pcie_cfg_configuration_accesses(rockchip,
-						AXI_WRAPPER_TYPE0_CFG);
-	else
-		rockchip_pcie_cfg_configuration_accesses(rockchip,
-						AXI_WRAPPER_TYPE1_CFG);
-
-	if (size == 4) {
-		*val = readl(rockchip->reg_base + busdev);
-	} else if (size == 2) {
-		*val = readw(rockchip->reg_base + busdev);
-	} else if (size == 1) {
-		*val = readb(rockchip->reg_base + busdev);
-	} else {
-		*val = 0;
-		return PCIBIOS_BAD_REGISTER_NUMBER;
+	rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
+	if (IS_ERR(rockchip->mgmt_rst)) {
+		if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing mgmt reset property in node\n");
+		return PTR_ERR(rockchip->mgmt_rst);
 	}
-	return PCIBIOS_SUCCESSFUL;
-}
-
-static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
-				       struct pci_bus *bus, u32 devfn,
-				       int where, int size, u32 val)
-{
-	u32 busdev;
-
-	busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
-				PCI_FUNC(devfn), where);
-	if (!IS_ALIGNED(busdev, size))
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-
-	if (bus->parent->number == rockchip->root_bus_nr)
-		rockchip_pcie_cfg_configuration_accesses(rockchip,
-						AXI_WRAPPER_TYPE0_CFG);
-	else
-		rockchip_pcie_cfg_configuration_accesses(rockchip,
-						AXI_WRAPPER_TYPE1_CFG);
-
-	if (size == 4)
-		writel(val, rockchip->reg_base + busdev);
-	else if (size == 2)
-		writew(val, rockchip->reg_base + busdev);
-	else if (size == 1)
-		writeb(val, rockchip->reg_base + busdev);
-	else
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-
-	return PCIBIOS_SUCCESSFUL;
-}
 
-static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
-				 int size, u32 *val)
-{
-	struct rockchip_pcie *rockchip = bus->sysdata;
-
-	if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
-		*val = 0xffffffff;
-		return PCIBIOS_DEVICE_NOT_FOUND;
+	rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
+								     "mgmt-sticky");
+	if (IS_ERR(rockchip->mgmt_sticky_rst)) {
+		if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing mgmt-sticky reset property in node\n");
+		return PTR_ERR(rockchip->mgmt_sticky_rst);
 	}
 
-	if (bus->number == rockchip->root_bus_nr)
-		return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
-
-	return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val);
-}
+	rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
+	if (IS_ERR(rockchip->pipe_rst)) {
+		if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing pipe reset property in node\n");
+		return PTR_ERR(rockchip->pipe_rst);
+	}
 
-static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
-				 int where, int size, u32 val)
-{
-	struct rockchip_pcie *rockchip = bus->sysdata;
+	rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
+	if (IS_ERR(rockchip->pm_rst)) {
+		if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing pm reset property in node\n");
+		return PTR_ERR(rockchip->pm_rst);
+	}
 
-	if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
-		return PCIBIOS_DEVICE_NOT_FOUND;
+	rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
+	if (IS_ERR(rockchip->pclk_rst)) {
+		if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing pclk reset property in node\n");
+		return PTR_ERR(rockchip->pclk_rst);
+	}
 
-	if (bus->number == rockchip->root_bus_nr)
-		return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+	rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
+	if (IS_ERR(rockchip->aclk_rst)) {
+		if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing aclk reset property in node\n");
+		return PTR_ERR(rockchip->aclk_rst);
+	}
 
-	return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val);
-}
+	if (rockchip->is_rc) {
+		rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
+		if (IS_ERR(rockchip->ep_gpio)) {
+			dev_err(dev, "missing ep-gpios property in node\n");
+			return PTR_ERR(rockchip->ep_gpio);
+		}
+	}
 
-static struct pci_ops rockchip_pcie_ops = {
-	.read = rockchip_pcie_rd_conf,
-	.write = rockchip_pcie_wr_conf,
-};
+	rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
+	if (IS_ERR(rockchip->aclk_pcie)) {
+		dev_err(dev, "aclk clock not found\n");
+		return PTR_ERR(rockchip->aclk_pcie);
+	}
 
-static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
-{
-	int curr;
-	u32 status, scale, power;
+	rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
+	if (IS_ERR(rockchip->aclk_perf_pcie)) {
+		dev_err(dev, "aclk_perf clock not found\n");
+		return PTR_ERR(rockchip->aclk_perf_pcie);
+	}
 
-	if (IS_ERR(rockchip->vpcie3v3))
-		return;
+	rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
+	if (IS_ERR(rockchip->hclk_pcie)) {
+		dev_err(dev, "hclk clock not found\n");
+		return PTR_ERR(rockchip->hclk_pcie);
+	}
 
-	/*
-	 * Set RC's captured slot power limit and scale if
-	 * vpcie3v3 available. The default values are both zero
-	 * which means the software should set these two according
-	 * to the actual power supply.
-	 */
-	curr = regulator_get_current_limit(rockchip->vpcie3v3);
-	if (curr <= 0)
-		return;
-
-	scale = 3; /* 0.001x */
-	curr = curr / 1000; /* convert to mA */
-	power = (curr * 3300) / 1000; /* milliwatt */
-	while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
-		if (!scale) {
-			dev_warn(rockchip->dev, "invalid power supply\n");
-			return;
-		}
-		scale--;
-		power = power / 10;
+	rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
+	if (IS_ERR(rockchip->clk_pcie_pm)) {
+		dev_err(dev, "pm clock not found\n");
+		return PTR_ERR(rockchip->clk_pcie_pm);
 	}
 
-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
-	status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
-		  (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+	return 0;
 }
+EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt);
 
-/**
- * rockchip_pcie_init_port - Initialize hardware
- * @rockchip: PCIe port information
- */
-static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
 {
 	struct device *dev = rockchip->dev;
 	int err, i;
-	u32 status;
-
-	gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+	u32 regs;
 
 	err = reset_control_assert(rockchip->aclk_rst);
 	if (err) {
@@ -618,13 +237,15 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
 		rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
 				    PCIE_CLIENT_CONFIG);
 
-	rockchip_pcie_write(rockchip,
-			    PCIE_CLIENT_CONF_ENABLE |
-			    PCIE_CLIENT_LINK_TRAIN_ENABLE |
-			    PCIE_CLIENT_ARI_ENABLE |
-			    PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
-			    PCIE_CLIENT_MODE_RC,
-			    PCIE_CLIENT_CONFIG);
+	regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
+	       PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
+
+	if (rockchip->is_rc)
+		regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
+	else
+		regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
+
+	rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG);
 
 	for (i = 0; i < MAX_LANE_NUM; i++) {
 		err = phy_power_on(rockchip->phys[i]);
@@ -662,93 +283,6 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
 		goto err_power_off_phy;
 	}
 
-	/* Fix the transmitted FTS count desired to exit from L0s. */
-	status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
-	status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
-		 (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
-	rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
-
-	rockchip_pcie_set_power_limit(rockchip);
-
-	/* Set RC's clock architecture as common clock */
-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
-	status |= PCI_EXP_LNKSTA_SLC << 16;
-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
-	/* Set RC's RCB to 128 */
-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
-	status |= PCI_EXP_LNKCTL_RCB;
-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
-	/* Enable Gen1 training */
-	rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
-			    PCIE_CLIENT_CONFIG);
-
-	gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
-
-	/* 500ms timeout value should be enough for Gen1/2 training */
-	err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
-				 status, PCIE_LINK_UP(status), 20,
-				 500 * USEC_PER_MSEC);
-	if (err) {
-		dev_err(dev, "PCIe link training gen1 timeout!\n");
-		goto err_power_off_phy;
-	}
-
-	if (rockchip->link_gen == 2) {
-		/*
-		 * Enable retrain for gen2. This should be configured only after
-		 * gen1 finished.
-		 */
-		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
-		status |= PCI_EXP_LNKCTL_RL;
-		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
-		err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
-					 status, PCIE_LINK_IS_GEN2(status), 20,
-					 500 * USEC_PER_MSEC);
-		if (err)
-			dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
-	}
-
-	/* Check the final link width from negotiated lane counter from MGMT */
-	status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
-	status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
-			  PCIE_CORE_PL_CONF_LANE_SHIFT);
-	dev_dbg(dev, "current link width is x%d\n", status);
-
-	/* Power off unused lane(s) */
-	rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
-	for (i = 0; i < MAX_LANE_NUM; i++) {
-		if (!(rockchip->lanes_map & BIT(i))) {
-			dev_dbg(dev, "idling lane %d\n", i);
-			phy_power_off(rockchip->phys[i]);
-		}
-	}
-
-	rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
-			    PCIE_CORE_CONFIG_VENDOR);
-	rockchip_pcie_write(rockchip,
-			    PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
-			    PCIE_RC_CONFIG_RID_CCR);
-
-	/* Clear THP cap's next cap pointer to remove L1 substate cap */
-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
-	status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
-
-	/* Clear L0s from RC's link cap */
-	if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
-		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
-		status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
-		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
-	}
-
-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
-	status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
-	status |= PCIE_RC_CONFIG_DCSR_MPS_256;
-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
-
 	return 0;
 err_power_off_phy:
 	while (i--)
@@ -759,156 +293,9 @@ err_exit_phy:
 		phy_exit(rockchip->phys[i]);
 	return err;
 }
+EXPORT_SYMBOL_GPL(rockchip_pcie_init_port);
 
-static void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
-{
-	int i;
-
-	for (i = 0; i < MAX_LANE_NUM; i++) {
-		/* inactive lanes are already powered off */
-		if (rockchip->lanes_map & BIT(i))
-			phy_power_off(rockchip->phys[i]);
-		phy_exit(rockchip->phys[i]);
-	}
-}
-
-static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
-{
-	struct rockchip_pcie *rockchip = arg;
-	struct device *dev = rockchip->dev;
-	u32 reg;
-	u32 sub_reg;
-
-	reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
-	if (reg & PCIE_CLIENT_INT_LOCAL) {
-		dev_dbg(dev, "local interrupt received\n");
-		sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
-		if (sub_reg & PCIE_CORE_INT_PRFPE)
-			dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
-
-		if (sub_reg & PCIE_CORE_INT_CRFPE)
-			dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
-
-		if (sub_reg & PCIE_CORE_INT_RRPE)
-			dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
-
-		if (sub_reg & PCIE_CORE_INT_PRFO)
-			dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
-
-		if (sub_reg & PCIE_CORE_INT_CRFO)
-			dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
-
-		if (sub_reg & PCIE_CORE_INT_RT)
-			dev_dbg(dev, "replay timer timed out\n");
-
-		if (sub_reg & PCIE_CORE_INT_RTR)
-			dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
-
-		if (sub_reg & PCIE_CORE_INT_PE)
-			dev_dbg(dev, "phy error detected on receive side\n");
-
-		if (sub_reg & PCIE_CORE_INT_MTR)
-			dev_dbg(dev, "malformed TLP received from the link\n");
-
-		if (sub_reg & PCIE_CORE_INT_UCR)
-			dev_dbg(dev, "malformed TLP received from the link\n");
-
-		if (sub_reg & PCIE_CORE_INT_FCE)
-			dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
-
-		if (sub_reg & PCIE_CORE_INT_CT)
-			dev_dbg(dev, "a request timed out waiting for completion\n");
-
-		if (sub_reg & PCIE_CORE_INT_UTC)
-			dev_dbg(dev, "unmapped TC error\n");
-
-		if (sub_reg & PCIE_CORE_INT_MMVC)
-			dev_dbg(dev, "MSI mask register changes\n");
-
-		rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
-	} else if (reg & PCIE_CLIENT_INT_PHY) {
-		dev_dbg(dev, "phy link changes\n");
-		rockchip_pcie_update_txcredit_mui(rockchip);
-		rockchip_pcie_clr_bw_int(rockchip);
-	}
-
-	rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
-			    PCIE_CLIENT_INT_STATUS);
-
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
-{
-	struct rockchip_pcie *rockchip = arg;
-	struct device *dev = rockchip->dev;
-	u32 reg;
-
-	reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
-	if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
-		dev_dbg(dev, "legacy done interrupt received\n");
-
-	if (reg & PCIE_CLIENT_INT_MSG)
-		dev_dbg(dev, "message done interrupt received\n");
-
-	if (reg & PCIE_CLIENT_INT_HOT_RST)
-		dev_dbg(dev, "hot reset interrupt received\n");
-
-	if (reg & PCIE_CLIENT_INT_DPA)
-		dev_dbg(dev, "dpa interrupt received\n");
-
-	if (reg & PCIE_CLIENT_INT_FATAL_ERR)
-		dev_dbg(dev, "fatal error interrupt received\n");
-
-	if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
-		dev_dbg(dev, "no fatal error interrupt received\n");
-
-	if (reg & PCIE_CLIENT_INT_CORR_ERR)
-		dev_dbg(dev, "correctable error interrupt received\n");
-
-	if (reg & PCIE_CLIENT_INT_PHY)
-		dev_dbg(dev, "phy interrupt received\n");
-
-	rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
-			      PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
-			      PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
-			      PCIE_CLIENT_INT_NFATAL_ERR |
-			      PCIE_CLIENT_INT_CORR_ERR |
-			      PCIE_CLIENT_INT_PHY),
-		   PCIE_CLIENT_INT_STATUS);
-
-	return IRQ_HANDLED;
-}
-
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
-{
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-	struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
-	struct device *dev = rockchip->dev;
-	u32 reg;
-	u32 hwirq;
-	u32 virq;
-
-	chained_irq_enter(chip, desc);
-
-	reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
-	reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
-
-	while (reg) {
-		hwirq = ffs(reg) - 1;
-		reg &= ~BIT(hwirq);
-
-		virq = irq_find_mapping(rockchip->irq_domain, hwirq);
-		if (virq)
-			generic_handle_irq(virq);
-		else
-			dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
-	}
-
-	chained_irq_exit(chip, desc);
-}
-
-static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
 {
 	struct device *dev = rockchip->dev;
 	struct phy *phy;
@@ -948,452 +335,22 @@ static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys);
 
-static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
 {
-	int irq, err;
-	struct device *dev = rockchip->dev;
-	struct platform_device *pdev = to_platform_device(dev);
-
-	irq = platform_get_irq_byname(pdev, "sys");
-	if (irq < 0) {
-		dev_err(dev, "missing sys IRQ resource\n");
-		return irq;
-	}
-
-	err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
-			       IRQF_SHARED, "pcie-sys", rockchip);
-	if (err) {
-		dev_err(dev, "failed to request PCIe subsystem IRQ\n");
-		return err;
-	}
-
-	irq = platform_get_irq_byname(pdev, "legacy");
-	if (irq < 0) {
-		dev_err(dev, "missing legacy IRQ resource\n");
-		return irq;
-	}
-
-	irq_set_chained_handler_and_data(irq,
-					 rockchip_pcie_legacy_int_handler,
-					 rockchip);
-
-	irq = platform_get_irq_byname(pdev, "client");
-	if (irq < 0) {
-		dev_err(dev, "missing client IRQ resource\n");
-		return irq;
-	}
-
-	err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
-			       IRQF_SHARED, "pcie-client", rockchip);
-	if (err) {
-		dev_err(dev, "failed to request PCIe client IRQ\n");
-		return err;
-	}
-
-	return 0;
-}
-
-/**
- * rockchip_pcie_parse_dt - Parse Device Tree
- * @rockchip: PCIe port information
- *
- * Return: '0' on success and error value on failure
- */
-static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
-{
-	struct device *dev = rockchip->dev;
-	struct platform_device *pdev = to_platform_device(dev);
-	struct device_node *node = dev->of_node;
-	struct resource *regs;
-	int err;
-
-	regs = platform_get_resource_byname(pdev,
-					    IORESOURCE_MEM,
-					    "axi-base");
-	rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
-	if (IS_ERR(rockchip->reg_base))
-		return PTR_ERR(rockchip->reg_base);
-
-	regs = platform_get_resource_byname(pdev,
-					    IORESOURCE_MEM,
-					    "apb-base");
-	rockchip->apb_base = devm_ioremap_resource(dev, regs);
-	if (IS_ERR(rockchip->apb_base))
-		return PTR_ERR(rockchip->apb_base);
-
-	err = rockchip_pcie_get_phys(rockchip);
-	if (err)
-		return err;
-
-	rockchip->lanes = 1;
-	err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
-	if (!err && (rockchip->lanes == 0 ||
-		     rockchip->lanes == 3 ||
-		     rockchip->lanes > 4)) {
-		dev_warn(dev, "invalid num-lanes, default to use one lane\n");
-		rockchip->lanes = 1;
-	}
-
-	rockchip->link_gen = of_pci_get_max_link_speed(node);
-	if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
-		rockchip->link_gen = 2;
-
-	rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
-	if (IS_ERR(rockchip->core_rst)) {
-		if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
-			dev_err(dev, "missing core reset property in node\n");
-		return PTR_ERR(rockchip->core_rst);
-	}
-
-	rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
-	if (IS_ERR(rockchip->mgmt_rst)) {
-		if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
-			dev_err(dev, "missing mgmt reset property in node\n");
-		return PTR_ERR(rockchip->mgmt_rst);
-	}
-
-	rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
-								     "mgmt-sticky");
-	if (IS_ERR(rockchip->mgmt_sticky_rst)) {
-		if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
-			dev_err(dev, "missing mgmt-sticky reset property in node\n");
-		return PTR_ERR(rockchip->mgmt_sticky_rst);
-	}
-
-	rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
-	if (IS_ERR(rockchip->pipe_rst)) {
-		if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
-			dev_err(dev, "missing pipe reset property in node\n");
-		return PTR_ERR(rockchip->pipe_rst);
-	}
-
-	rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
-	if (IS_ERR(rockchip->pm_rst)) {
-		if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
-			dev_err(dev, "missing pm reset property in node\n");
-		return PTR_ERR(rockchip->pm_rst);
-	}
-
-	rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
-	if (IS_ERR(rockchip->pclk_rst)) {
-		if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
-			dev_err(dev, "missing pclk reset property in node\n");
-		return PTR_ERR(rockchip->pclk_rst);
-	}
-
-	rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
-	if (IS_ERR(rockchip->aclk_rst)) {
-		if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
-			dev_err(dev, "missing aclk reset property in node\n");
-		return PTR_ERR(rockchip->aclk_rst);
-	}
-
-	rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
-	if (IS_ERR(rockchip->ep_gpio)) {
-		dev_err(dev, "missing ep-gpios property in node\n");
-		return PTR_ERR(rockchip->ep_gpio);
-	}
-
-	rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
-	if (IS_ERR(rockchip->aclk_pcie)) {
-		dev_err(dev, "aclk clock not found\n");
-		return PTR_ERR(rockchip->aclk_pcie);
-	}
-
-	rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
-	if (IS_ERR(rockchip->aclk_perf_pcie)) {
-		dev_err(dev, "aclk_perf clock not found\n");
-		return PTR_ERR(rockchip->aclk_perf_pcie);
-	}
-
-	rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
-	if (IS_ERR(rockchip->hclk_pcie)) {
-		dev_err(dev, "hclk clock not found\n");
-		return PTR_ERR(rockchip->hclk_pcie);
-	}
-
-	rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
-	if (IS_ERR(rockchip->clk_pcie_pm)) {
-		dev_err(dev, "pm clock not found\n");
-		return PTR_ERR(rockchip->clk_pcie_pm);
-	}
-
-	err = rockchip_pcie_setup_irq(rockchip);
-	if (err)
-		return err;
-
-	rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
-	if (IS_ERR(rockchip->vpcie12v)) {
-		if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		dev_info(dev, "no vpcie12v regulator found\n");
-	}
-
-	rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
-	if (IS_ERR(rockchip->vpcie3v3)) {
-		if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		dev_info(dev, "no vpcie3v3 regulator found\n");
-	}
-
-	rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
-	if (IS_ERR(rockchip->vpcie1v8)) {
-		if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		dev_info(dev, "no vpcie1v8 regulator found\n");
-	}
-
-	rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
-	if (IS_ERR(rockchip->vpcie0v9)) {
-		if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		dev_info(dev, "no vpcie0v9 regulator found\n");
-	}
-
-	return 0;
-}
-
-static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
-{
-	struct device *dev = rockchip->dev;
-	int err;
-
-	if (!IS_ERR(rockchip->vpcie12v)) {
-		err = regulator_enable(rockchip->vpcie12v);
-		if (err) {
-			dev_err(dev, "fail to enable vpcie12v regulator\n");
-			goto err_out;
-		}
-	}
-
-	if (!IS_ERR(rockchip->vpcie3v3)) {
-		err = regulator_enable(rockchip->vpcie3v3);
-		if (err) {
-			dev_err(dev, "fail to enable vpcie3v3 regulator\n");
-			goto err_disable_12v;
-		}
-	}
-
-	if (!IS_ERR(rockchip->vpcie1v8)) {
-		err = regulator_enable(rockchip->vpcie1v8);
-		if (err) {
-			dev_err(dev, "fail to enable vpcie1v8 regulator\n");
-			goto err_disable_3v3;
-		}
-	}
-
-	if (!IS_ERR(rockchip->vpcie0v9)) {
-		err = regulator_enable(rockchip->vpcie0v9);
-		if (err) {
-			dev_err(dev, "fail to enable vpcie0v9 regulator\n");
-			goto err_disable_1v8;
-		}
-	}
-
-	return 0;
-
-err_disable_1v8:
-	if (!IS_ERR(rockchip->vpcie1v8))
-		regulator_disable(rockchip->vpcie1v8);
-err_disable_3v3:
-	if (!IS_ERR(rockchip->vpcie3v3))
-		regulator_disable(rockchip->vpcie3v3);
-err_disable_12v:
-	if (!IS_ERR(rockchip->vpcie12v))
-		regulator_disable(rockchip->vpcie12v);
-err_out:
-	return err;
-}
-
-static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
-{
-	rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
-			    (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
-	rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
-			    PCIE_CORE_INT_MASK);
-
-	rockchip_pcie_enable_bw_int(rockchip);
-}
-
-static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
-				  irq_hw_number_t hwirq)
-{
-	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
-	irq_set_chip_data(irq, domain->host_data);
-
-	return 0;
-}
-
-static const struct irq_domain_ops intx_domain_ops = {
-	.map = rockchip_pcie_intx_map,
-};
-
-static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
-{
-	struct device *dev = rockchip->dev;
-	struct device_node *intc = of_get_next_child(dev->of_node, NULL);
-
-	if (!intc) {
-		dev_err(dev, "missing child interrupt-controller node\n");
-		return -EINVAL;
-	}
-
-	rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
-						    &intx_domain_ops, rockchip);
-	if (!rockchip->irq_domain) {
-		dev_err(dev, "failed to get a INTx IRQ domain\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
-				     int region_no, int type, u8 num_pass_bits,
-				     u32 lower_addr, u32 upper_addr)
-{
-	u32 ob_addr_0;
-	u32 ob_addr_1;
-	u32 ob_desc_0;
-	u32 aw_offset;
-
-	if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
-		return -EINVAL;
-	if (num_pass_bits + 1 < 8)
-		return -EINVAL;
-	if (num_pass_bits > 63)
-		return -EINVAL;
-	if (region_no == 0) {
-		if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
-			return -EINVAL;
-	}
-	if (region_no != 0) {
-		if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
-			return -EINVAL;
-	}
-
-	aw_offset = (region_no << OB_REG_SIZE_SHIFT);
-
-	ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
-	ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
-	ob_addr_1 = upper_addr;
-	ob_desc_0 = (1 << 23 | type);
-
-	rockchip_pcie_write(rockchip, ob_addr_0,
-			    PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
-	rockchip_pcie_write(rockchip, ob_addr_1,
-			    PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
-	rockchip_pcie_write(rockchip, ob_desc_0,
-			    PCIE_CORE_OB_REGION_DESC0 + aw_offset);
-	rockchip_pcie_write(rockchip, 0,
-			    PCIE_CORE_OB_REGION_DESC1 + aw_offset);
-
-	return 0;
-}
-
-static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
-				     int region_no, u8 num_pass_bits,
-				     u32 lower_addr, u32 upper_addr)
-{
-	u32 ib_addr_0;
-	u32 ib_addr_1;
-	u32 aw_offset;
-
-	if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
-		return -EINVAL;
-	if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
-		return -EINVAL;
-	if (num_pass_bits > 63)
-		return -EINVAL;
-
-	aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
-
-	ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
-	ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
-	ib_addr_1 = upper_addr;
-
-	rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
-	rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
-
-	return 0;
-}
-
-static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
-{
-	struct device *dev = rockchip->dev;
-	int offset;
-	int err;
-	int reg_no;
-
-	rockchip_pcie_cfg_configuration_accesses(rockchip,
-						 AXI_WRAPPER_TYPE0_CFG);
-
-	for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
-		err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
-						AXI_WRAPPER_MEM_WRITE,
-						20 - 1,
-						rockchip->mem_bus_addr +
-						(reg_no << 20),
-						0);
-		if (err) {
-			dev_err(dev, "program RC mem outbound ATU failed\n");
-			return err;
-		}
-	}
-
-	err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
-	if (err) {
-		dev_err(dev, "program RC mem inbound ATU failed\n");
-		return err;
-	}
-
-	offset = rockchip->mem_size >> 20;
-	for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
-		err = rockchip_pcie_prog_ob_atu(rockchip,
-						reg_no + 1 + offset,
-						AXI_WRAPPER_IO_WRITE,
-						20 - 1,
-						rockchip->io_bus_addr +
-						(reg_no << 20),
-						0);
-		if (err) {
-			dev_err(dev, "program RC io outbound ATU failed\n");
-			return err;
-		}
-	}
-
-	/* assign message regions */
-	rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
-				  AXI_WRAPPER_NOR_MSG,
-				  20 - 1, 0, 0);
-
-	rockchip->msg_bus_addr = rockchip->mem_bus_addr +
-					((reg_no + offset) << 20);
-	return err;
-}
-
-static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
-{
-	u32 value;
-	int err;
-
-	/* send PME_TURN_OFF message */
-	writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
+	int i;
 
-	/* read LTSSM and wait for falling into L2 link state */
-	err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
-				 value, PCIE_LINK_IS_L2(value), 20,
-				 jiffies_to_usecs(5 * HZ));
-	if (err) {
-		dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
-		return err;
+	for (i = 0; i < MAX_LANE_NUM; i++) {
+		/* inactive lanes are already powered off */
+		if (rockchip->lanes_map & BIT(i))
+			phy_power_off(rockchip->phys[i]);
+		phy_exit(rockchip->phys[i]);
 	}
-
-	return 0;
 }
+EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys);
 
-static int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
 {
 	struct device *dev = rockchip->dev;
 	int err;
@@ -1432,8 +389,9 @@ err_aclk_perf_pcie:
 	clk_disable_unprepare(rockchip->aclk_pcie);
 	return err;
 }
+EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
 
-static void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(void *data)
 {
 	struct rockchip_pcie *rockchip = data;
 
@@ -1442,267 +400,25 @@ static void rockchip_pcie_disable_clocks(void *data)
 	clk_disable_unprepare(rockchip->aclk_perf_pcie);
 	clk_disable_unprepare(rockchip->aclk_pcie);
 }
+EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
 
-static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
-{
-	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
-	int ret;
-
-	/* disable core and cli int since we don't need to ack PME_ACK */
-	rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
-			    PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
-	rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
-
-	ret = rockchip_pcie_wait_l2(rockchip);
-	if (ret) {
-		rockchip_pcie_enable_interrupts(rockchip);
-		return ret;
-	}
-
-	rockchip_pcie_deinit_phys(rockchip);
-
-	rockchip_pcie_disable_clocks(rockchip);
-
-	if (!IS_ERR(rockchip->vpcie0v9))
-		regulator_disable(rockchip->vpcie0v9);
-
-	return ret;
-}
-
-static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
-{
-	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
-	int err;
-
-	if (!IS_ERR(rockchip->vpcie0v9)) {
-		err = regulator_enable(rockchip->vpcie0v9);
-		if (err) {
-			dev_err(dev, "fail to enable vpcie0v9 regulator\n");
-			return err;
-		}
-	}
-
-	err = rockchip_pcie_enable_clocks(rockchip);
-	if (err)
-		goto err_disable_0v9;
-
-	err = rockchip_pcie_init_port(rockchip);
-	if (err)
-		goto err_pcie_resume;
-
-	err = rockchip_pcie_cfg_atu(rockchip);
-	if (err)
-		goto err_err_deinit_port;
-
-	/* Need this to enter L1 again */
-	rockchip_pcie_update_txcredit_mui(rockchip);
-	rockchip_pcie_enable_interrupts(rockchip);
-
-	return 0;
-
-err_err_deinit_port:
-	rockchip_pcie_deinit_phys(rockchip);
-err_pcie_resume:
-	rockchip_pcie_disable_clocks(rockchip);
-err_disable_0v9:
-	if (!IS_ERR(rockchip->vpcie0v9))
-		regulator_disable(rockchip->vpcie0v9);
-	return err;
-}
-
-static int rockchip_pcie_probe(struct platform_device *pdev)
-{
-	struct rockchip_pcie *rockchip;
-	struct device *dev = &pdev->dev;
-	struct pci_bus *bus, *child;
-	struct pci_host_bridge *bridge;
-	struct resource_entry *win;
-	resource_size_t io_base;
-	struct resource	*mem;
-	struct resource	*io;
-	int err;
-
-	LIST_HEAD(res);
-
-	if (!dev->of_node)
-		return -ENODEV;
-
-	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
-	if (!bridge)
-		return -ENOMEM;
-
-	rockchip = pci_host_bridge_priv(bridge);
-
-	platform_set_drvdata(pdev, rockchip);
-	rockchip->dev = dev;
-
-	err = rockchip_pcie_parse_dt(rockchip);
-	if (err)
-		return err;
-
-	err = rockchip_pcie_enable_clocks(rockchip);
-	if (err)
-		return err;
-
-	err = rockchip_pcie_set_vpcie(rockchip);
-	if (err) {
-		dev_err(dev, "failed to set vpcie regulator\n");
-		goto err_set_vpcie;
-	}
-
-	err = rockchip_pcie_init_port(rockchip);
-	if (err)
-		goto err_vpcie;
-
-	rockchip_pcie_enable_interrupts(rockchip);
-
-	err = rockchip_pcie_init_irq_domain(rockchip);
-	if (err < 0)
-		goto err_deinit_port;
-
-	err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
-					       &res, &io_base);
-	if (err)
-		goto err_remove_irq_domain;
-
-	err = devm_request_pci_bus_resources(dev, &res);
-	if (err)
-		goto err_free_res;
-
-	/* Get the I/O and memory ranges from DT */
-	resource_list_for_each_entry(win, &res) {
-		switch (resource_type(win->res)) {
-		case IORESOURCE_IO:
-			io = win->res;
-			io->name = "I/O";
-			rockchip->io_size = resource_size(io);
-			rockchip->io_bus_addr = io->start - win->offset;
-			err = pci_remap_iospace(io, io_base);
-			if (err) {
-				dev_warn(dev, "error %d: failed to map resource %pR\n",
-					 err, io);
-				continue;
-			}
-			rockchip->io = io;
-			break;
-		case IORESOURCE_MEM:
-			mem = win->res;
-			mem->name = "MEM";
-			rockchip->mem_size = resource_size(mem);
-			rockchip->mem_bus_addr = mem->start - win->offset;
-			break;
-		case IORESOURCE_BUS:
-			rockchip->root_bus_nr = win->res->start;
-			break;
-		default:
-			continue;
-		}
-	}
-
-	err = rockchip_pcie_cfg_atu(rockchip);
-	if (err)
-		goto err_unmap_iospace;
-
-	rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
-	if (!rockchip->msg_region) {
-		err = -ENOMEM;
-		goto err_unmap_iospace;
-	}
-
-	list_splice_init(&res, &bridge->windows);
-	bridge->dev.parent = dev;
-	bridge->sysdata = rockchip;
-	bridge->busnr = 0;
-	bridge->ops = &rockchip_pcie_ops;
-	bridge->map_irq = of_irq_parse_and_map_pci;
-	bridge->swizzle_irq = pci_common_swizzle;
-
-	err = pci_scan_root_bus_bridge(bridge);
-	if (err < 0)
-		goto err_unmap_iospace;
-
-	bus = bridge->bus;
-
-	rockchip->root_bus = bus;
-
-	pci_bus_size_bridges(bus);
-	pci_bus_assign_resources(bus);
-	list_for_each_entry(child, &bus->children, node)
-		pcie_bus_configure_settings(child);
-
-	pci_bus_add_devices(bus);
-	return 0;
-
-err_unmap_iospace:
-	pci_unmap_iospace(rockchip->io);
-err_free_res:
-	pci_free_resource_list(&res);
-err_remove_irq_domain:
-	irq_domain_remove(rockchip->irq_domain);
-err_deinit_port:
-	rockchip_pcie_deinit_phys(rockchip);
-err_vpcie:
-	if (!IS_ERR(rockchip->vpcie12v))
-		regulator_disable(rockchip->vpcie12v);
-	if (!IS_ERR(rockchip->vpcie3v3))
-		regulator_disable(rockchip->vpcie3v3);
-	if (!IS_ERR(rockchip->vpcie1v8))
-		regulator_disable(rockchip->vpcie1v8);
-	if (!IS_ERR(rockchip->vpcie0v9))
-		regulator_disable(rockchip->vpcie0v9);
-err_set_vpcie:
-	rockchip_pcie_disable_clocks(rockchip);
-	return err;
-}
-
-static int rockchip_pcie_remove(struct platform_device *pdev)
+void rockchip_pcie_cfg_configuration_accesses(
+		struct rockchip_pcie *rockchip, u32 type)
 {
-	struct device *dev = &pdev->dev;
-	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
-
-	pci_stop_root_bus(rockchip->root_bus);
-	pci_remove_root_bus(rockchip->root_bus);
-	pci_unmap_iospace(rockchip->io);
-	irq_domain_remove(rockchip->irq_domain);
-
-	rockchip_pcie_deinit_phys(rockchip);
-
-	rockchip_pcie_disable_clocks(rockchip);
+	u32 ob_desc_0;
 
-	if (!IS_ERR(rockchip->vpcie12v))
-		regulator_disable(rockchip->vpcie12v);
-	if (!IS_ERR(rockchip->vpcie3v3))
-		regulator_disable(rockchip->vpcie3v3);
-	if (!IS_ERR(rockchip->vpcie1v8))
-		regulator_disable(rockchip->vpcie1v8);
-	if (!IS_ERR(rockchip->vpcie0v9))
-		regulator_disable(rockchip->vpcie0v9);
+	/* Configuration Accesses for region 0 */
+	rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
 
-	return 0;
+	rockchip_pcie_write(rockchip,
+			    (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
+			    PCIE_CORE_OB_REGION_ADDR0);
+	rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
+			    PCIE_CORE_OB_REGION_ADDR1);
+	ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
+	ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
+	ob_desc_0 |= (type | (0x1 << 23));
+	rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
+	rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
 }
-
-static const struct dev_pm_ops rockchip_pcie_pm_ops = {
-	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
-				      rockchip_pcie_resume_noirq)
-};
-
-static const struct of_device_id rockchip_pcie_of_match[] = {
-	{ .compatible = "rockchip,rk3399-pcie", },
-	{}
-};
-MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
-
-static struct platform_driver rockchip_pcie_driver = {
-	.driver = {
-		.name = "rockchip-pcie",
-		.of_match_table = rockchip_pcie_of_match,
-		.pm = &rockchip_pcie_pm_ops,
-	},
-	.probe = rockchip_pcie_probe,
-	.remove = rockchip_pcie_remove,
-};
-module_platform_driver(rockchip_pcie_driver);
-
-MODULE_AUTHOR("Rockchip Inc");
-MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
-MODULE_LICENSE("GPL v2");
+EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses);
diff --git a/drivers/pci/host/pcie-rockchip.h b/drivers/pci/host/pcie-rockchip.h
new file mode 100644
index 000000000000..8e87a059ce73
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip.h
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *
+ */
+
+#ifndef _PCIE_ROCKCHIP_H
+#define _PCIE_ROCKCHIP_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
+ * bits.  This allows atomic updates of the register without locking.
+ */
+#define HIWORD_UPDATE(mask, val)	(((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val)		HIWORD_UPDATE(val, val)
+
+#define ENCODE_LANES(x)			((((x) >> 1) & 3) << 4)
+#define MAX_LANE_NUM			4
+#define MAX_REGION_LIMIT		32
+#define MIN_EP_APERTURE			28
+
+#define PCIE_CLIENT_BASE		0x0
+#define PCIE_CLIENT_CONFIG		(PCIE_CLIENT_BASE + 0x00)
+#define   PCIE_CLIENT_CONF_ENABLE	  HIWORD_UPDATE_BIT(0x0001)
+#define   PCIE_CLIENT_CONF_DISABLE       HIWORD_UPDATE(0x0001, 0)
+#define   PCIE_CLIENT_LINK_TRAIN_ENABLE	  HIWORD_UPDATE_BIT(0x0002)
+#define   PCIE_CLIENT_ARI_ENABLE	  HIWORD_UPDATE_BIT(0x0008)
+#define   PCIE_CLIENT_CONF_LANE_NUM(x)	  HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
+#define   PCIE_CLIENT_MODE_RC		  HIWORD_UPDATE_BIT(0x0040)
+#define   PCIE_CLIENT_MODE_EP            HIWORD_UPDATE(0x0040, 0)
+#define   PCIE_CLIENT_GEN_SEL_1		  HIWORD_UPDATE(0x0080, 0)
+#define   PCIE_CLIENT_GEN_SEL_2		  HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_DEBUG_OUT_0		(PCIE_CLIENT_BASE + 0x3c)
+#define   PCIE_CLIENT_DEBUG_LTSSM_MASK		GENMASK(5, 0)
+#define   PCIE_CLIENT_DEBUG_LTSSM_L1		0x18
+#define   PCIE_CLIENT_DEBUG_LTSSM_L2		0x19
+#define PCIE_CLIENT_BASIC_STATUS1	(PCIE_CLIENT_BASE + 0x48)
+#define   PCIE_CLIENT_LINK_STATUS_UP		0x00300000
+#define   PCIE_CLIENT_LINK_STATUS_MASK		0x00300000
+#define PCIE_CLIENT_INT_MASK		(PCIE_CLIENT_BASE + 0x4c)
+#define PCIE_CLIENT_INT_STATUS		(PCIE_CLIENT_BASE + 0x50)
+#define   PCIE_CLIENT_INTR_MASK			GENMASK(8, 5)
+#define   PCIE_CLIENT_INTR_SHIFT		5
+#define   PCIE_CLIENT_INT_LEGACY_DONE		BIT(15)
+#define   PCIE_CLIENT_INT_MSG			BIT(14)
+#define   PCIE_CLIENT_INT_HOT_RST		BIT(13)
+#define   PCIE_CLIENT_INT_DPA			BIT(12)
+#define   PCIE_CLIENT_INT_FATAL_ERR		BIT(11)
+#define   PCIE_CLIENT_INT_NFATAL_ERR		BIT(10)
+#define   PCIE_CLIENT_INT_CORR_ERR		BIT(9)
+#define   PCIE_CLIENT_INT_INTD			BIT(8)
+#define   PCIE_CLIENT_INT_INTC			BIT(7)
+#define   PCIE_CLIENT_INT_INTB			BIT(6)
+#define   PCIE_CLIENT_INT_INTA			BIT(5)
+#define   PCIE_CLIENT_INT_LOCAL			BIT(4)
+#define   PCIE_CLIENT_INT_UDMA			BIT(3)
+#define   PCIE_CLIENT_INT_PHY			BIT(2)
+#define   PCIE_CLIENT_INT_HOT_PLUG		BIT(1)
+#define   PCIE_CLIENT_INT_PWR_STCG		BIT(0)
+
+#define PCIE_CLIENT_INT_LEGACY \
+	(PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
+	PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
+
+#define PCIE_CLIENT_INT_CLI \
+	(PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
+	PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
+	PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
+	PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
+	PCIE_CLIENT_INT_PHY)
+
+#define PCIE_CORE_CTRL_MGMT_BASE	0x900000
+#define PCIE_CORE_CTRL			(PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define   PCIE_CORE_PL_CONF_SPEED_5G		0x00000008
+#define   PCIE_CORE_PL_CONF_SPEED_MASK		0x00000018
+#define   PCIE_CORE_PL_CONF_LANE_MASK		0x00000006
+#define   PCIE_CORE_PL_CONF_LANE_SHIFT		1
+#define PCIE_CORE_CTRL_PLC1		(PCIE_CORE_CTRL_MGMT_BASE + 0x004)
+#define   PCIE_CORE_CTRL_PLC1_FTS_MASK		GENMASK(23, 8)
+#define   PCIE_CORE_CTRL_PLC1_FTS_SHIFT		8
+#define   PCIE_CORE_CTRL_PLC1_FTS_CNT		0xffff
+#define PCIE_CORE_TXCREDIT_CFG1		(PCIE_CORE_CTRL_MGMT_BASE + 0x020)
+#define   PCIE_CORE_TXCREDIT_CFG1_MUI_MASK	0xFFFF0000
+#define   PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT	16
+#define   PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
+		(((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
+#define PCIE_CORE_LANE_MAP             (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
+#define   PCIE_CORE_LANE_MAP_MASK              0x0000000f
+#define   PCIE_CORE_LANE_MAP_REVERSE           BIT(16)
+#define PCIE_CORE_INT_STATUS		(PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
+#define   PCIE_CORE_INT_PRFPE			BIT(0)
+#define   PCIE_CORE_INT_CRFPE			BIT(1)
+#define   PCIE_CORE_INT_RRPE			BIT(2)
+#define   PCIE_CORE_INT_PRFO			BIT(3)
+#define   PCIE_CORE_INT_CRFO			BIT(4)
+#define   PCIE_CORE_INT_RT			BIT(5)
+#define   PCIE_CORE_INT_RTR			BIT(6)
+#define   PCIE_CORE_INT_PE			BIT(7)
+#define   PCIE_CORE_INT_MTR			BIT(8)
+#define   PCIE_CORE_INT_UCR			BIT(9)
+#define   PCIE_CORE_INT_FCE			BIT(10)
+#define   PCIE_CORE_INT_CT			BIT(11)
+#define   PCIE_CORE_INT_UTC			BIT(18)
+#define   PCIE_CORE_INT_MMVC			BIT(19)
+#define PCIE_CORE_CONFIG_VENDOR		(PCIE_CORE_CTRL_MGMT_BASE + 0x44)
+#define PCIE_CORE_INT_MASK		(PCIE_CORE_CTRL_MGMT_BASE + 0x210)
+#define PCIE_CORE_PHY_FUNC_CFG		(PCIE_CORE_CTRL_MGMT_BASE + 0x2c0)
+#define PCIE_RC_BAR_CONF		(PCIE_CORE_CTRL_MGMT_BASE + 0x300)
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED		0x0
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS		0x1
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS		0x4
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS	0x5
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS		0x6
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS	0x7
+
+#define PCIE_CORE_INT \
+		(PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
+		 PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
+		 PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
+		 PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
+		 PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
+		 PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
+		 PCIE_CORE_INT_MMVC)
+
+#define PCIE_RC_RP_ATS_BASE		0x400000
+#define PCIE_RC_CONFIG_NORMAL_BASE	0x800000
+#define PCIE_RC_CONFIG_BASE		0xa00000
+#define PCIE_RC_CONFIG_RID_CCR		(PCIE_RC_CONFIG_BASE + 0x08)
+#define   PCIE_RC_CONFIG_SCC_SHIFT		16
+#define PCIE_RC_CONFIG_DCR		(PCIE_RC_CONFIG_BASE + 0xc4)
+#define   PCIE_RC_CONFIG_DCR_CSPL_SHIFT		18
+#define   PCIE_RC_CONFIG_DCR_CSPL_LIMIT		0xff
+#define   PCIE_RC_CONFIG_DCR_CPLS_SHIFT		26
+#define PCIE_RC_CONFIG_DCSR		(PCIE_RC_CONFIG_BASE + 0xc8)
+#define   PCIE_RC_CONFIG_DCSR_MPS_MASK		GENMASK(7, 5)
+#define   PCIE_RC_CONFIG_DCSR_MPS_256		(0x1 << 5)
+#define PCIE_RC_CONFIG_LINK_CAP		(PCIE_RC_CONFIG_BASE + 0xcc)
+#define   PCIE_RC_CONFIG_LINK_CAP_L0S		BIT(10)
+#define PCIE_RC_CONFIG_LCS		(PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+#define PCIE_RC_CONFIG_THP_CAP		(PCIE_RC_CONFIG_BASE + 0x274)
+#define   PCIE_RC_CONFIG_THP_CAP_NEXT_MASK	GENMASK(31, 20)
+
+#define PCIE_CORE_AXI_CONF_BASE		0xc00000
+#define PCIE_CORE_OB_REGION_ADDR0	(PCIE_CORE_AXI_CONF_BASE + 0x0)
+#define   PCIE_CORE_OB_REGION_ADDR0_NUM_BITS	0x3f
+#define   PCIE_CORE_OB_REGION_ADDR0_LO_ADDR	0xffffff00
+#define PCIE_CORE_OB_REGION_ADDR1	(PCIE_CORE_AXI_CONF_BASE + 0x4)
+#define PCIE_CORE_OB_REGION_DESC0	(PCIE_CORE_AXI_CONF_BASE + 0x8)
+#define PCIE_CORE_OB_REGION_DESC1	(PCIE_CORE_AXI_CONF_BASE + 0xc)
+
+#define PCIE_CORE_AXI_INBOUND_BASE	0xc00800
+#define PCIE_RP_IB_ADDR0		(PCIE_CORE_AXI_INBOUND_BASE + 0x0)
+#define   PCIE_CORE_IB_REGION_ADDR0_NUM_BITS	0x3f
+#define   PCIE_CORE_IB_REGION_ADDR0_LO_ADDR	0xffffff00
+#define PCIE_RP_IB_ADDR1		(PCIE_CORE_AXI_INBOUND_BASE + 0x4)
+
+/* Size of one AXI Region (not Region 0) */
+#define AXI_REGION_SIZE				BIT(20)
+/* Size of Region 0, equal to sum of sizes of other regions */
+#define AXI_REGION_0_SIZE			(32 * (0x1 << 20))
+#define OB_REG_SIZE_SHIFT			5
+#define IB_ROOT_PORT_REG_SIZE_SHIFT		3
+#define AXI_WRAPPER_IO_WRITE			0x6
+#define AXI_WRAPPER_MEM_WRITE			0x2
+#define AXI_WRAPPER_TYPE0_CFG			0xa
+#define AXI_WRAPPER_TYPE1_CFG			0xb
+#define AXI_WRAPPER_NOR_MSG			0xc
+
+#define MAX_AXI_IB_ROOTPORT_REGION_NUM		3
+#define MIN_AXI_ADDR_BITS_PASSED		8
+#define PCIE_RC_SEND_PME_OFF			0x11960
+#define ROCKCHIP_VENDOR_ID			0x1d87
+#define PCIE_ECAM_BUS(x)			(((x) & 0xff) << 20)
+#define PCIE_ECAM_DEV(x)			(((x) & 0x1f) << 15)
+#define PCIE_ECAM_FUNC(x)			(((x) & 0x7) << 12)
+#define PCIE_ECAM_REG(x)			(((x) & 0xfff) << 0)
+#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
+	  (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
+	   PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
+#define PCIE_LINK_IS_L2(x) \
+	(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+#define PCIE_LINK_UP(x) \
+	(((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
+#define PCIE_LINK_IS_GEN2(x) \
+	(((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
+
+#define RC_REGION_0_ADDR_TRANS_H		0x00000000
+#define RC_REGION_0_ADDR_TRANS_L		0x00000000
+#define RC_REGION_0_PASS_BITS			(25 - 1)
+#define RC_REGION_0_TYPE_MASK			GENMASK(3, 0)
+#define MAX_AXI_WRAPPER_REGION_NUM		33
+
+#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC		0x0
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR		0x1
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID		0x2
+#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST		0x3
+#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX		0x4
+#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK		0x5
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA		0x20
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB		0x21
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC		0x22
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD		0x23
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA		0x24
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB		0x25
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC		0x26
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD		0x27
+#define ROCKCHIP_PCIE_MSG_ROUTING_MASK			GENMASK(7, 5)
+#define ROCKCHIP_PCIE_MSG_ROUTING(route) \
+	(((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK)
+#define ROCKCHIP_PCIE_MSG_CODE_MASK			GENMASK(15, 8)
+#define ROCKCHIP_PCIE_MSG_CODE(code) \
+	(((code) << 8) & ROCKCHIP_PCIE_MSG_CODE_MASK)
+#define ROCKCHIP_PCIE_MSG_NO_DATA			BIT(16)
+
+#define ROCKCHIP_PCIE_EP_CMD_STATUS			0x4
+#define   ROCKCHIP_PCIE_EP_CMD_STATUS_IS		BIT(19)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG			0x90
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET		17
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK		GENMASK(19, 17)
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET		20
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK		GENMASK(22, 20)
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_ME				BIT(16)
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP	BIT(24)
+#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR				0x1
+#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR		0x3
+#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn)	(((fn) << 12) & GENMASK(19, 12))
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+	(PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+	(PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+	(PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK	GENMASK(19, 12)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+	(((devfn) << 12) & \
+		 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK	GENMASK(27, 20)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+		(((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+		(PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID	BIT(23)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK	GENMASK(31, 24)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+		(((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \
+		(PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)	\
+		(PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+		(PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+		(PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
+
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \
+		(PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn) \
+		(PCIE_CORE_CTRL_MGMT_BASE + 0x0244 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+		(GENMASK(4, 0) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+		(((a) << ((b) * 8)) & \
+		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+		(GENMASK(7, 5) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+		(((c) << ((b) * 8 + 5)) & \
+		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+struct rockchip_pcie {
+	void	__iomem *reg_base;		/* DT axi-base */
+	void	__iomem *apb_base;		/* DT apb-base */
+	bool    legacy_phy;
+	struct  phy *phys[MAX_LANE_NUM];
+	struct	reset_control *core_rst;
+	struct	reset_control *mgmt_rst;
+	struct	reset_control *mgmt_sticky_rst;
+	struct	reset_control *pipe_rst;
+	struct	reset_control *pm_rst;
+	struct	reset_control *aclk_rst;
+	struct	reset_control *pclk_rst;
+	struct	clk *aclk_pcie;
+	struct	clk *aclk_perf_pcie;
+	struct	clk *hclk_pcie;
+	struct	clk *clk_pcie_pm;
+	struct	regulator *vpcie12v; /* 12V power supply */
+	struct	regulator *vpcie3v3; /* 3.3V power supply */
+	struct	regulator *vpcie1v8; /* 1.8V power supply */
+	struct	regulator *vpcie0v9; /* 0.9V power supply */
+	struct	gpio_desc *ep_gpio;
+	u32	lanes;
+	u8      lanes_map;
+	u8	root_bus_nr;
+	int	link_gen;
+	struct	device *dev;
+	struct	irq_domain *irq_domain;
+	int     offset;
+	struct pci_bus *root_bus;
+	struct resource *io;
+	phys_addr_t io_bus_addr;
+	u32     io_size;
+	void    __iomem *msg_region;
+	u32     mem_size;
+	phys_addr_t msg_bus_addr;
+	phys_addr_t mem_bus_addr;
+	bool is_rc;
+	struct resource *mem_res;
+};
+
+static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
+{
+	return readl(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
+				u32 reg)
+{
+	writel(val, rockchip->apb_base + reg);
+}
+
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip);
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
+void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_cfg_configuration_accesses(
+		struct rockchip_pcie *rockchip, u32 type);
+
+#endif /* _PCIE_ROCKCHIP_H */
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 4839ae578711..6a4bbb5b3de0 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -21,6 +21,8 @@
 #include <linux/platform_device.h>
 #include <linux/irqchip/chained_irq.h>
 
+#include "../pci.h"
+
 /* Bridge core config registers */
 #define BRCFG_PCIE_RX0			0x00000000
 #define BRCFG_INTERRUPT			0x00000010
@@ -825,7 +827,6 @@ static const struct of_device_id nwl_pcie_of_match[] = {
 static int nwl_pcie_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct device_node *node = dev->of_node;
 	struct nwl_pcie *pcie;
 	struct pci_bus *bus;
 	struct pci_bus *child;
@@ -855,7 +856,8 @@ static int nwl_pcie_probe(struct platform_device *pdev)
 		return err;
 	}
 
-	err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+						    &iobase);
 	if (err) {
 		dev_err(dev, "Getting bridge resources failed\n");
 		return err;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 0ad188effc09..b110a3a814e3 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -23,6 +23,8 @@
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 
+#include "../pci.h"
+
 /* Register definitions */
 #define XILINX_PCIE_REG_BIR		0x00000130
 #define XILINX_PCIE_REG_IDR		0x00000138
@@ -643,8 +645,8 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
 		return err;
 	}
 
-	err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, &res,
-					       &iobase);
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+						    &iobase);
 	if (err) {
 		dev_err(dev, "Getting bridge resources failed\n");
 		return err;
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 930a8fa08bd6..942b64fc7f1f 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -24,6 +24,28 @@
 #define VMD_MEMBAR1	2
 #define VMD_MEMBAR2	4
 
+#define PCI_REG_VMCAP		0x40
+#define BUS_RESTRICT_CAP(vmcap)	(vmcap & 0x1)
+#define PCI_REG_VMCONFIG	0x44
+#define BUS_RESTRICT_CFG(vmcfg)	((vmcfg >> 8) & 0x3)
+#define PCI_REG_VMLOCK		0x70
+#define MB2_SHADOW_EN(vmlock)	(vmlock & 0x2)
+
+enum vmd_features {
+	/*
+	 * Device may contain registers which hint the physical location of the
+	 * membars, in order to allow proper address translation during
+	 * resource assignment to enable guest virtualization
+	 */
+	VMD_FEAT_HAS_MEMBAR_SHADOW	= (1 << 0),
+
+	/*
+	 * Device may provide root port configuration information which limits
+	 * bus numbering
+	 */
+	VMD_FEAT_HAS_BUS_RESTRICTIONS	= (1 << 1),
+};
+
 /*
  * Lock for manipulating VMD IRQ lists.
  */
@@ -546,7 +568,7 @@ static int vmd_find_free_domain(void)
 	return domain + 1;
 }
 
-static int vmd_enable_domain(struct vmd_dev *vmd)
+static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 {
 	struct pci_sysdata *sd = &vmd->sysdata;
 	struct fwnode_handle *fn;
@@ -554,12 +576,57 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
 	u32 upper_bits;
 	unsigned long flags;
 	LIST_HEAD(resources);
+	resource_size_t offset[2] = {0};
+	resource_size_t membar2_offset = 0x2000, busn_start = 0;
+
+	/*
+	 * Shadow registers may exist in certain VMD device ids which allow
+	 * guests to correctly assign host physical addresses to the root ports
+	 * and child devices. These registers will either return the host value
+	 * or 0, depending on an enable bit in the VMD device.
+	 */
+	if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
+		u32 vmlock;
+		int ret;
+
+		membar2_offset = 0x2018;
+		ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
+		if (ret || vmlock == ~0)
+			return -ENODEV;
+
+		if (MB2_SHADOW_EN(vmlock)) {
+			void __iomem *membar2;
+
+			membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
+			if (!membar2)
+				return -ENOMEM;
+			offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
+						readq(membar2 + 0x2008);
+			offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
+						readq(membar2 + 0x2010);
+			pci_iounmap(vmd->dev, membar2);
+		}
+	}
+
+	/*
+	 * Certain VMD devices may have a root port configuration option which
+	 * limits the bus range to between 0-127 or 128-255
+	 */
+	if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
+		u32 vmcap, vmconfig;
+
+		pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
+		pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
+		if (BUS_RESTRICT_CAP(vmcap) &&
+		    (BUS_RESTRICT_CFG(vmconfig) == 0x1))
+			busn_start = 128;
+	}
 
 	res = &vmd->dev->resource[VMD_CFGBAR];
 	vmd->resources[0] = (struct resource) {
 		.name  = "VMD CFGBAR",
-		.start = 0,
-		.end   = (resource_size(res) >> 20) - 1,
+		.start = busn_start,
+		.end   = busn_start + (resource_size(res) >> 20) - 1,
 		.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
 	};
 
@@ -600,7 +667,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
 		flags &= ~IORESOURCE_MEM_64;
 	vmd->resources[2] = (struct resource) {
 		.name  = "VMD MEMBAR2",
-		.start = res->start + 0x2000,
+		.start = res->start + membar2_offset,
 		.end   = res->end,
 		.flags = flags,
 		.parent = res,
@@ -624,10 +691,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
 		return -ENODEV;
 
 	pci_add_resource(&resources, &vmd->resources[0]);
-	pci_add_resource(&resources, &vmd->resources[1]);
-	pci_add_resource(&resources, &vmd->resources[2]);
-	vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd,
-				       &resources);
+	pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
+	pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
+
+	vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
+				       sd, &resources);
 	if (!vmd->bus) {
 		pci_free_resource_list(&resources);
 		irq_domain_remove(vmd->irq_domain);
@@ -713,7 +781,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
 	spin_lock_init(&vmd->cfg_lock);
 	pci_set_drvdata(dev, vmd);
-	err = vmd_enable_domain(vmd);
+	err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
 	if (err)
 		return err;
 
@@ -778,7 +846,10 @@ static int vmd_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
 
 static const struct pci_device_id vmd_ids[] = {
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
+		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
+				VMD_FEAT_HAS_BUS_RESTRICTIONS,},
 	{0,}
 };
 MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index a8f21d051e0c..e9f78eb390d2 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -104,14 +104,11 @@ config HOTPLUG_PCI_CPCI_GENERIC
 	  When in doubt, say N.
 
 config HOTPLUG_PCI_SHPC
-	tristate "SHPC PCI Hotplug driver"
+	bool "SHPC PCI Hotplug driver"
 	help
 	  Say Y here if you have a motherboard with a SHPC PCI Hotplug
 	  controller.
 
-	  To compile this driver as a module, choose M here: the
-	  module will be called shpchp.
-
 	  When in doubt, say N.
 
 config HOTPLUG_PCI_POWERNV
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index c9816166978e..3979f89b250a 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -63,22 +63,17 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
 /**
  * acpi_get_hp_hw_control_from_firmware
  * @dev: the pci_dev of the bridge that has a hotplug controller
- * @flags: requested control bits for _OSC
  *
  * Attempt to take hotplug control from firmware.
  */
-int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
 {
+	const struct pci_host_bridge *host;
+	const struct acpi_pci_root *root;
 	acpi_status status;
 	acpi_handle chandle, handle;
 	struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
 
-	flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
-	if (!flags) {
-		err("Invalid flags %u specified!\n", flags);
-		return -EINVAL;
-	}
-
 	/*
 	 * Per PCI firmware specification, we should run the ACPI _OSC
 	 * method to get control of hotplug hardware before using it. If
@@ -88,25 +83,20 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
 	 * OSHP within the scope of the hotplug controller and its parents,
 	 * up to the host bridge under which this controller exists.
 	 */
-	handle = acpi_find_root_bridge_handle(pdev);
-	if (handle) {
-		acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
-		dbg("Trying to get hotplug control for %s\n",
-				(char *)string.pointer);
-		status = acpi_pci_osc_control_set(handle, &flags, flags);
-		if (ACPI_SUCCESS(status))
-			goto got_one;
-		if (status == AE_SUPPORT)
-			goto no_control;
-		kfree(string.pointer);
-		string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
-	}
+	if (shpchp_is_native(pdev))
+		return 0;
+
+	/* If _OSC exists, we should not evaluate OSHP */
+	host = pci_find_host_bridge(pdev->bus);
+	root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
+	if (root->osc_support_set)
+		goto no_control;
 
 	handle = ACPI_HANDLE(&pdev->dev);
 	if (!handle) {
 		/*
 		 * This hotplug controller was not listed in the ACPI name
-		 * space at all. Try to get acpi handle of parent pci bus.
+		 * space at all. Try to get ACPI handle of parent PCI bus.
 		 */
 		struct pci_bus *pbus;
 		for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
@@ -118,8 +108,8 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
 
 	while (handle) {
 		acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
-		dbg("Trying to get hotplug control for %s\n",
-		    (char *)string.pointer);
+		pci_info(pdev, "Requesting control of SHPC hotplug via OSHP (%s)\n",
+			 (char *)string.pointer);
 		status = acpi_run_oshp(handle);
 		if (ACPI_SUCCESS(status))
 			goto got_one;
@@ -131,13 +121,12 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
 			break;
 	}
 no_control:
-	dbg("Cannot get control of hotplug hardware for pci %s\n",
-	    pci_name(pdev));
+	pci_info(pdev, "Cannot get control of SHPC hotplug\n");
 	kfree(string.pointer);
 	return -ENODEV;
 got_one:
-	dbg("Gained control for hotplug HW for pci %s (%s)\n",
-	    pci_name(pdev), (char *)string.pointer);
+	pci_info(pdev, "Gained control of SHPC hotplug (%s)\n",
+		 (char *)string.pointer);
 	kfree(string.pointer);
 	return 0;
 }
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index b45b375c0e6c..3a17b290df5d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -287,11 +287,12 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
 	/*
 	 * Expose slots to user space for functions that have _EJ0 or _RMV or
 	 * are located in dock stations.  Do not expose them for devices handled
-	 * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to
-	 * expose slots to user space in those cases.
+	 * by the native PCIe hotplug (PCIeHP) or standard PCI hotplug
+	 * (SHPCHP), because that code is supposed to expose slots to user
+	 * space in those cases.
 	 */
 	if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
-	    && !(pdev && pdev->is_hotplug_bridge && pciehp_is_native(pdev))) {
+	    && !(pdev && hotplug_is_native(pdev))) {
 		unsigned long long sun;
 		int retval;
 
@@ -430,6 +431,29 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
 	return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
 }
 
+static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
+{
+	struct pci_bus *bus = bridge->subordinate;
+	struct pci_dev *dev;
+	int max;
+
+	if (!bus)
+		return;
+
+	max = bus->busn_res.start;
+	/* Scan already configured non-hotplug bridges */
+	for_each_pci_bridge(dev, bus) {
+		if (!hotplug_is_native(dev))
+			max = pci_scan_bridge(bus, dev, max, 0);
+	}
+
+	/* Scan non-hotplug bridges that need to be reconfigured */
+	for_each_pci_bridge(dev, bus) {
+		if (!hotplug_is_native(dev))
+			max = pci_scan_bridge(bus, dev, max, 1);
+	}
+}
+
 /**
  * enable_slot - enable, configure a slot
  * @slot: slot to be enabled
@@ -442,25 +466,42 @@ static void enable_slot(struct acpiphp_slot *slot)
 	struct pci_dev *dev;
 	struct pci_bus *bus = slot->bus;
 	struct acpiphp_func *func;
-	int max, pass;
-	LIST_HEAD(add_list);
 
-	acpiphp_rescan_slot(slot);
-	max = acpiphp_max_busnr(bus);
-	for (pass = 0; pass < 2; pass++) {
+	if (bus->self && hotplug_is_native(bus->self)) {
+		/*
+		 * If native hotplug is used, it will take care of hotplug
+		 * slot management and resource allocation for hotplug
+		 * bridges. However, ACPI hotplug may still be used for
+		 * non-hotplug bridges to bring in additional devices such
+		 * as a Thunderbolt host controller.
+		 */
 		for_each_pci_bridge(dev, bus) {
-			if (PCI_SLOT(dev->devfn) != slot->device)
-				continue;
-
-			max = pci_scan_bridge(bus, dev, max, pass);
-			if (pass && dev->subordinate) {
-				check_hotplug_bridge(slot, dev);
-				pcibios_resource_survey_bus(dev->subordinate);
-				__pci_bus_size_bridges(dev->subordinate, &add_list);
+			if (PCI_SLOT(dev->devfn) == slot->device)
+				acpiphp_native_scan_bridge(dev);
+		}
+		pci_assign_unassigned_bridge_resources(bus->self);
+	} else {
+		LIST_HEAD(add_list);
+		int max, pass;
+
+		acpiphp_rescan_slot(slot);
+		max = acpiphp_max_busnr(bus);
+		for (pass = 0; pass < 2; pass++) {
+			for_each_pci_bridge(dev, bus) {
+				if (PCI_SLOT(dev->devfn) != slot->device)
+					continue;
+
+				max = pci_scan_bridge(bus, dev, max, pass);
+				if (pass && dev->subordinate) {
+					check_hotplug_bridge(slot, dev);
+					pcibios_resource_survey_bus(dev->subordinate);
+					__pci_bus_size_bridges(dev->subordinate,
+							       &add_list);
+				}
 			}
 		}
+		__pci_bus_assign_resources(bus, &add_list, NULL);
 	}
-	__pci_bus_assign_resources(bus, &add_list, NULL);
 
 	acpiphp_sanitize_bus(bus);
 	pcie_bus_configure_settings(bus);
@@ -481,7 +522,7 @@ static void enable_slot(struct acpiphp_slot *slot)
 		if (!dev) {
 			/* Do not set SLOT_ENABLED flag if some funcs
 			   are not added. */
-			slot->flags &= (~SLOT_ENABLED);
+			slot->flags &= ~SLOT_ENABLED;
 			continue;
 		}
 	}
@@ -510,7 +551,7 @@ static void disable_slot(struct acpiphp_slot *slot)
 	list_for_each_entry(func, &slot->funcs, sibling)
 		acpi_bus_trim(func_to_acpi_device(func));
 
-	slot->flags &= (~SLOT_ENABLED);
+	slot->flags &= ~SLOT_ENABLED;
 }
 
 static bool slot_no_hotplug(struct acpiphp_slot *slot)
@@ -608,6 +649,11 @@ static void trim_stale_devices(struct pci_dev *dev)
 		alive = pci_device_is_present(dev);
 
 	if (!alive) {
+		pci_dev_set_disconnected(dev, NULL);
+		if (pci_has_subordinate(dev))
+			pci_walk_bus(dev->subordinate, pci_dev_set_disconnected,
+				     NULL);
+
 		pci_stop_and_remove_bus_device(dev);
 		if (adev)
 			acpi_bus_trim(adev);
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index b81ca3fa0e84..1869b0411ce0 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -379,7 +379,7 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
 
 static int get_max_bus_speed(struct slot *slot)
 {
-	int rc;
+	int rc = 0;
 	u8 mode = 0;
 	enum pci_bus_speed speed;
 	struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 88e917c9120f..5f892065585e 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -121,7 +121,7 @@ struct controller *pcie_init(struct pcie_device *dev);
 int pcie_init_notification(struct controller *ctrl);
 int pciehp_enable_slot(struct slot *p_slot);
 int pciehp_disable_slot(struct slot *p_slot);
-void pcie_enable_notification(struct controller *ctrl);
+void pcie_reenable_notification(struct controller *ctrl);
 int pciehp_power_on_slot(struct slot *slot);
 void pciehp_power_off_slot(struct slot *slot);
 void pciehp_get_power_status(struct slot *slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 332b723ff9e6..44a6a63802d5 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -283,7 +283,7 @@ static int pciehp_resume(struct pcie_device *dev)
 	ctrl = get_service_data(dev);
 
 	/* reinitialize the chipset's event detection logic */
-	pcie_enable_notification(ctrl);
+	pcie_reenable_notification(ctrl);
 
 	slot = ctrl->slot;
 
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 18a42f8f5dc5..718b6073afad 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -10,7 +10,6 @@
  * All rights reserved.
  *
  * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
- *
  */
 
 #include <linux/kernel.h>
@@ -147,25 +146,22 @@ static void pcie_wait_cmd(struct controller *ctrl)
 	else
 		rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
 
-	/*
-	 * Controllers with errata like Intel CF118 don't generate
-	 * completion notifications unless the power/indicator/interlock
-	 * control bits are changed.  On such controllers, we'll emit this
-	 * timeout message when we wait for completion of commands that
-	 * don't change those bits, e.g., commands that merely enable
-	 * interrupts.
-	 */
 	if (!rc)
 		ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
 			  ctrl->slot_ctrl,
 			  jiffies_to_msecs(jiffies - ctrl->cmd_started));
 }
 
+#define CC_ERRATUM_MASK		(PCI_EXP_SLTCTL_PCC |	\
+				 PCI_EXP_SLTCTL_PIC |	\
+				 PCI_EXP_SLTCTL_AIC |	\
+				 PCI_EXP_SLTCTL_EIC)
+
 static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
 			      u16 mask, bool wait)
 {
 	struct pci_dev *pdev = ctrl_dev(ctrl);
-	u16 slot_ctrl;
+	u16 slot_ctrl_orig, slot_ctrl;
 
 	mutex_lock(&ctrl->ctrl_lock);
 
@@ -180,6 +176,7 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
 		goto out;
 	}
 
+	slot_ctrl_orig = slot_ctrl;
 	slot_ctrl &= ~mask;
 	slot_ctrl |= (cmd & mask);
 	ctrl->cmd_busy = 1;
@@ -189,6 +186,17 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
 	ctrl->slot_ctrl = slot_ctrl;
 
 	/*
+	 * Controllers with the Intel CF118 and similar errata advertise
+	 * Command Completed support, but they only set Command Completed
+	 * if we change the "Control" bits for power, power indicator,
+	 * attention indicator, or interlock.  If we only change the
+	 * "Enable" bits, they never set the Command Completed bit.
+	 */
+	if (pdev->broken_cmd_compl &&
+	    (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
+		ctrl->cmd_busy = 0;
+
+	/*
 	 * Optionally wait for the hardware to be ready for a new command,
 	 * indicating completion of the above issued command.
 	 */
@@ -231,25 +239,11 @@ bool pciehp_check_link_active(struct controller *ctrl)
 	return ret;
 }
 
-static void __pcie_wait_link_active(struct controller *ctrl, bool active)
-{
-	int timeout = 1000;
-
-	if (pciehp_check_link_active(ctrl) == active)
-		return;
-	while (timeout > 0) {
-		msleep(10);
-		timeout -= 10;
-		if (pciehp_check_link_active(ctrl) == active)
-			return;
-	}
-	ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n",
-			active ? "set" : "cleared");
-}
-
 static void pcie_wait_link_active(struct controller *ctrl)
 {
-	__pcie_wait_link_active(ctrl, true);
+	struct pci_dev *pdev = ctrl_dev(ctrl);
+
+	pcie_wait_for_link(pdev, true);
 }
 
 static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
@@ -659,7 +653,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
 	return handled;
 }
 
-void pcie_enable_notification(struct controller *ctrl)
+static void pcie_enable_notification(struct controller *ctrl)
 {
 	u16 cmd, mask;
 
@@ -697,6 +691,17 @@ void pcie_enable_notification(struct controller *ctrl)
 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
 }
 
+void pcie_reenable_notification(struct controller *ctrl)
+{
+	/*
+	 * Clear both Presence and Data Link Layer Changed to make sure
+	 * those events still fire after we have re-enabled them.
+	 */
+	pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
+				   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+	pcie_enable_notification(ctrl);
+}
+
 static void pcie_disable_notification(struct controller *ctrl)
 {
 	u16 mask;
@@ -861,7 +866,7 @@ struct controller *pcie_init(struct pcie_device *dev)
 		PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
 		PCI_EXP_SLTSTA_DLLSC);
 
-	ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n",
+	ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
 		(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
 		FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
 		FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
@@ -872,7 +877,8 @@ struct controller *pcie_init(struct pcie_device *dev)
 		FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
 		FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
 		FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
-		FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
+		FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
+		pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
 
 	if (pcie_init_slot(ctrl))
 		goto abort_ctrl;
@@ -891,3 +897,21 @@ void pciehp_release_ctrl(struct controller *ctrl)
 	pcie_cleanup_slot(ctrl);
 	kfree(ctrl);
 }
+
+static void quirk_cmd_compl(struct pci_dev *pdev)
+{
+	u32 slot_cap;
+
+	if (pci_is_pcie(pdev)) {
+		pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
+		if (slot_cap & PCI_EXP_SLTCAP_HPC &&
+		    !(slot_cap & PCI_EXP_SLTCAP_NCCS))
+			pdev->broken_cmd_compl = 1;
+	}
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
+			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
+			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index d44100687dfe..6c2e8d7307c6 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -220,12 +220,16 @@ static int pnv_php_populate_changeset(struct of_changeset *ocs,
 
 	for_each_child_of_node(dn, child) {
 		ret = of_changeset_attach_node(ocs, child);
-		if (ret)
+		if (ret) {
+			of_node_put(child);
 			break;
+		}
 
 		ret = pnv_php_populate_changeset(ocs, child);
-		if (ret)
+		if (ret) {
+			of_node_put(child);
 			break;
+		}
 	}
 
 	return ret;
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index c55730b61c9a..516e4835019c 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -105,7 +105,6 @@ struct controller {
 };
 
 /* Define AMD SHPC ID  */
-#define PCI_DEVICE_ID_AMD_GOLAM_7450	0x7450
 #define PCI_DEVICE_ID_AMD_POGO_7458	0x7458
 
 /* AMD PCI-X bridge registers */
@@ -173,17 +172,6 @@ static inline const char *slot_name(struct slot *slot)
 	return hotplug_slot_name(slot->hotplug_slot);
 }
 
-#ifdef CONFIG_ACPI
-#include <linux/pci-acpi.h>
-static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
-{
-	u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
-	return acpi_get_hp_hw_control_from_firmware(dev, flags);
-}
-#else
-#define get_hp_hw_control_from_firmware(dev) (0)
-#endif
-
 struct ctrl_reg {
 	volatile u32 base_offset;
 	volatile u32 slot_avail1;
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 1f0f96908b5a..e91be287f292 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -270,24 +270,12 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
 	return 0;
 }
 
-static int is_shpc_capable(struct pci_dev *dev)
-{
-	if (dev->vendor == PCI_VENDOR_ID_AMD &&
-	    dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
-		return 1;
-	if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
-		return 0;
-	if (get_hp_hw_control_from_firmware(dev))
-		return 0;
-	return 1;
-}
-
 static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	int rc;
 	struct controller *ctrl;
 
-	if (!is_shpc_capable(pdev))
+	if (acpi_get_hp_hw_control_from_firmware(pdev))
 		return -ENODEV;
 
 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index bedda5bda910..1047b56e5730 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -585,13 +585,13 @@ static int shpchp_enable_slot (struct slot *p_slot)
 	ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
 	p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
 
-	if (((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
-	    (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458))
+	if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD &&
+	     p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)
 	     && p_slot->ctrl->num_slots == 1) {
-		/* handle amd pogo errata; this must be done before enable  */
+		/* handle AMD POGO errata; this must be done before enable  */
 		amd_pogo_errata_save_misc_reg(p_slot);
 		retval = board_added(p_slot);
-		/* handle amd pogo errata; this must be done after enable  */
+		/* handle AMD POGO errata; this must be done after enable  */
 		amd_pogo_errata_restore_misc_reg(p_slot);
 	} else
 		retval = board_added(p_slot);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 8adf4a64f291..d0d73dbbd5ca 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -469,6 +469,7 @@ found:
 	iov->nres = nres;
 	iov->ctrl = ctrl;
 	iov->total_VFs = total;
+	iov->driver_max_VFs = total;
 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
 	iov->pgsz = pgsz;
 	iov->self = dev;
@@ -827,9 +828,42 @@ int pci_sriov_get_totalvfs(struct pci_dev *dev)
 	if (!dev->is_physfn)
 		return 0;
 
-	if (dev->sriov->driver_max_VFs)
-		return dev->sriov->driver_max_VFs;
-
-	return dev->sriov->total_VFs;
+	return dev->sriov->driver_max_VFs;
 }
 EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
+
+/**
+ * pci_sriov_configure_simple - helper to configure SR-IOV
+ * @dev: the PCI device
+ * @nr_virtfn: number of virtual functions to enable, 0 to disable
+ *
+ * Enable or disable SR-IOV for devices that don't require any PF setup
+ * before enabling SR-IOV.  Return value is negative on error, or number of
+ * VFs allocated on success.
+ */
+int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
+{
+	int rc;
+
+	might_sleep();
+
+	if (!dev->is_physfn)
+		return -ENODEV;
+
+	if (pci_vfs_assigned(dev)) {
+		pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n");
+		return -EPERM;
+	}
+
+	if (nr_virtfn == 0) {
+		sriov_disable(dev);
+		return 0;
+	}
+
+	rc = sriov_enable(dev, nr_virtfn);
+	if (rc < 0)
+		return rc;
+
+	return nr_virtfn;
+}
+EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index a28355c273ae..d088c9147f10 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -244,8 +244,9 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
 
 #if defined(CONFIG_OF_ADDRESS)
 /**
- * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
- * @dev: device node of the host bridge having the range property
+ * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
+ *                                           host bridge resources from DT
+ * @dev: host bridge device
  * @busno: bus number associated with the bridge root bus
  * @bus_max: maximum number of buses for this bridge
  * @resources: list where the range of resources will be added after DT parsing
@@ -253,8 +254,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
  * address for the start of the I/O range. Can be NULL if the caller doesn't
  * expect I/O ranges to be present in the device tree.
  *
- * It is the caller's job to free the @resources list.
- *
  * This function will parse the "ranges" property of a PCI host bridge device
  * node and setup the resource mapping based on its content. It is expected
  * that the property conforms with the Power ePAPR document.
@@ -262,11 +261,11 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
  * It returns zero if the range parsing has been successful or a standard error
  * value if it failed.
  */
-int of_pci_get_host_bridge_resources(struct device_node *dev,
+int devm_of_pci_get_host_bridge_resources(struct device *dev,
 			unsigned char busno, unsigned char bus_max,
 			struct list_head *resources, resource_size_t *io_base)
 {
-	struct resource_entry *window;
+	struct device_node *dev_node = dev->of_node;
 	struct resource *res;
 	struct resource *bus_range;
 	struct of_pci_range range;
@@ -277,19 +276,19 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
 	if (io_base)
 		*io_base = (resource_size_t)OF_BAD_ADDR;
 
-	bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
+	bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
 	if (!bus_range)
 		return -ENOMEM;
 
-	pr_info("host bridge %pOF ranges:\n", dev);
+	dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
 
-	err = of_pci_parse_bus_range(dev, bus_range);
+	err = of_pci_parse_bus_range(dev_node, bus_range);
 	if (err) {
 		bus_range->start = busno;
 		bus_range->end = bus_max;
 		bus_range->flags = IORESOURCE_BUS;
-		pr_info("  No bus range found for %pOF, using %pR\n",
-			dev, bus_range);
+		dev_info(dev, "  No bus range found for %pOF, using %pR\n",
+			 dev_node, bus_range);
 	} else {
 		if (bus_range->end > bus_range->start + bus_max)
 			bus_range->end = bus_range->start + bus_max;
@@ -297,11 +296,11 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
 	pci_add_resource(resources, bus_range);
 
 	/* Check for ranges property */
-	err = of_pci_range_parser_init(&parser, dev);
+	err = of_pci_range_parser_init(&parser, dev_node);
 	if (err)
-		goto parse_failed;
+		goto failed;
 
-	pr_debug("Parsing ranges property...\n");
+	dev_dbg(dev, "Parsing ranges property...\n");
 	for_each_of_pci_range(&parser, &range) {
 		/* Read next ranges element */
 		if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
@@ -310,9 +309,9 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
 			snprintf(range_type, 4, "MEM");
 		else
 			snprintf(range_type, 4, "err");
-		pr_info("  %s %#010llx..%#010llx -> %#010llx\n", range_type,
-			range.cpu_addr, range.cpu_addr + range.size - 1,
-			range.pci_addr);
+		dev_info(dev, "  %s %#010llx..%#010llx -> %#010llx\n",
+			 range_type, range.cpu_addr,
+			 range.cpu_addr + range.size - 1, range.pci_addr);
 
 		/*
 		 * If we failed translation or got a zero-sized region
@@ -321,28 +320,28 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
 		if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
 			continue;
 
-		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+		res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
 		if (!res) {
 			err = -ENOMEM;
-			goto parse_failed;
+			goto failed;
 		}
 
-		err = of_pci_range_to_resource(&range, dev, res);
+		err = of_pci_range_to_resource(&range, dev_node, res);
 		if (err) {
-			kfree(res);
+			devm_kfree(dev, res);
 			continue;
 		}
 
 		if (resource_type(res) == IORESOURCE_IO) {
 			if (!io_base) {
-				pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
-					dev);
+				dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
+					dev_node);
 				err = -EINVAL;
-				goto conversion_failed;
+				goto failed;
 			}
 			if (*io_base != (resource_size_t)OF_BAD_ADDR)
-				pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
-					dev);
+				dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
+					 dev_node);
 			*io_base = range.cpu_addr;
 		}
 
@@ -351,15 +350,11 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
 
 	return 0;
 
-conversion_failed:
-	kfree(res);
-parse_failed:
-	resource_list_for_each_entry(window, resources)
-		kfree(window->res);
+failed:
 	pci_free_resource_list(resources);
 	return err;
 }
-EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
+EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
 #endif /* CONFIG_OF_ADDRESS */
 
 /**
@@ -599,12 +594,12 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
 				    struct resource **bus_range)
 {
 	int err, res_valid = 0;
-	struct device_node *np = dev->of_node;
 	resource_size_t iobase;
 	struct resource_entry *win, *tmp;
 
 	INIT_LIST_HEAD(resources);
-	err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
+						    &iobase);
 	if (err)
 		return err;
 
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1abdbf267c19..65113b6eed14 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -370,26 +370,57 @@ EXPORT_SYMBOL_GPL(pci_get_hp_params);
 
 /**
  * pciehp_is_native - Check whether a hotplug port is handled by the OS
- * @pdev: Hotplug port to check
+ * @bridge: Hotplug port to check
  *
- * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field
- * and return the value of the "PCI Express Native Hot Plug control" bit.
- * On failure to obtain the _OSC Control Field return %false.
+ * Returns true if the given @bridge is handled by the native PCIe hotplug
+ * driver.
  */
-bool pciehp_is_native(struct pci_dev *pdev)
+bool pciehp_is_native(struct pci_dev *bridge)
 {
-	struct acpi_pci_root *root;
-	acpi_handle handle;
+	const struct pci_host_bridge *host;
+	u32 slot_cap;
 
-	handle = acpi_find_root_bridge_handle(pdev);
-	if (!handle)
+	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
 		return false;
 
-	root = acpi_pci_find_root(handle);
-	if (!root)
+	pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
+	if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
+		return false;
+
+	if (pcie_ports_native)
+		return true;
+
+	host = pci_find_host_bridge(bridge->bus);
+	return host->native_pcie_hotplug;
+}
+
+/**
+ * shpchp_is_native - Check whether a hotplug port is handled by the OS
+ * @bridge: Hotplug port to check
+ *
+ * Returns true if the given @bridge is handled by the native SHPC hotplug
+ * driver.
+ */
+bool shpchp_is_native(struct pci_dev *bridge)
+{
+	const struct pci_host_bridge *host;
+
+	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
+		return false;
+
+	/*
+	 * It is assumed that AMD GOLAM chips support SHPC but they do not
+	 * have SHPC capability.
+	 */
+	if (bridge->vendor == PCI_VENDOR_ID_AMD &&
+	    bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
+		return true;
+
+	if (!pci_find_capability(bridge, PCI_CAP_ID_SHPC))
 		return false;
 
-	return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+	host = pci_find_host_bridge(bridge->bus);
+	return host->native_shpc_hotplug;
 }
 
 /**
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 18ba62c76480..c125d53033c6 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1539,7 +1539,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
 	return 0;
 }
 
-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
 /**
  * pci_uevent_ers - emit a uevent during recovery path of PCI device
  * @pdev: PCI device undergoing error recovery
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
new file mode 100644
index 000000000000..9795649fc6f9
--- /dev/null
+++ b/drivers/pci/pci-pf-stub.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* pci-pf-stub - simple stub driver for PCI SR-IOV PF device
+ *
+ * This driver is meant to act as a "whitelist" for devices that provde
+ * SR-IOV functionality while at the same time not actually needing a
+ * driver of their own.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+/**
+ * pci_pf_stub_whitelist - White list of devices to bind pci-pf-stub onto
+ *
+ * This table provides the list of IDs this driver is supposed to bind
+ * onto.  You could think of this as a list of "quirked" devices where we
+ * are adding support for SR-IOV here since there are no other drivers
+ * that they would be running under.
+ */
+static const struct pci_device_id pci_pf_stub_whitelist[] = {
+	{ PCI_VDEVICE(AMAZON, 0x0053) },
+	/* required last entry */
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, pci_pf_stub_whitelist);
+
+static int pci_pf_stub_probe(struct pci_dev *dev,
+			     const struct pci_device_id *id)
+{
+	pci_info(dev, "claimed by pci-pf-stub\n");
+	return 0;
+}
+
+static struct pci_driver pf_stub_driver = {
+	.name			= "pci-pf-stub",
+	.id_table		= pci_pf_stub_whitelist,
+	.probe			= pci_pf_stub_probe,
+	.sriov_configure	= pci_sriov_configure_simple,
+};
+
+static int __init pci_pf_stub_init(void)
+{
+	return pci_register_driver(&pf_stub_driver);
+}
+
+static void __exit pci_pf_stub_exit(void)
+{
+	pci_unregister_driver(&pf_stub_driver);
+}
+
+module_init(pci_pf_stub_init);
+module_exit(pci_pf_stub_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 366d93af051d..788a200fb2dc 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -288,13 +288,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	if (!val) {
-		if (pci_is_enabled(pdev))
-			pci_disable_device(pdev);
-		else
-			result = -EIO;
-	} else
+	device_lock(dev);
+	if (dev->driver)
+		result = -EBUSY;
+	else if (val)
 		result = pci_enable_device(pdev);
+	else if (pci_is_enabled(pdev))
+		pci_disable_device(pdev);
+	else
+		result = -EIO;
+	device_unlock(dev);
 
 	return result < 0 ? result : count;
 }
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e90cf5c32e14..97acba712e4e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -112,6 +112,14 @@ unsigned int pcibios_max_latency = 255;
 /* If set, the PCIe ARI capability will not be used. */
 static bool pcie_ari_disabled;
 
+/* If set, the PCIe ATS capability will not be used. */
+static bool pcie_ats_disabled;
+
+bool pci_ats_disabled(void)
+{
+	return pcie_ats_disabled;
+}
+
 /* Disable bridge_d3 for all PCIe ports */
 static bool pci_bridge_d3_disable;
 /* Force bridge_d3 for all PCIe ports */
@@ -4153,6 +4161,35 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
 
 	return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
 }
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+	int timeout = 1000;
+	bool ret;
+	u16 lnk_status;
+
+	for (;;) {
+		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+		ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+		if (ret == active)
+			return true;
+		if (timeout <= 0)
+			break;
+		msleep(10);
+		timeout -= 10;
+	}
+
+	pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
+		 active ? "set" : "cleared");
+
+	return false;
+}
 
 void pci_reset_secondary_bus(struct pci_dev *dev)
 {
@@ -5085,49 +5122,6 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
 EXPORT_SYMBOL(pcie_set_mps);
 
 /**
- * pcie_get_minimum_link - determine minimum link settings of a PCI device
- * @dev: PCI device to query
- * @speed: storage for minimum speed
- * @width: storage for minimum width
- *
- * This function will walk up the PCI device chain and determine the minimum
- * link width and speed of the device.
- */
-int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
-			  enum pcie_link_width *width)
-{
-	int ret;
-
-	*speed = PCI_SPEED_UNKNOWN;
-	*width = PCIE_LNK_WIDTH_UNKNOWN;
-
-	while (dev) {
-		u16 lnksta;
-		enum pci_bus_speed next_speed;
-		enum pcie_link_width next_width;
-
-		ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
-		if (ret)
-			return ret;
-
-		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
-		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
-			PCI_EXP_LNKSTA_NLW_SHIFT;
-
-		if (next_speed < *speed)
-			*speed = next_speed;
-
-		if (next_width < *width)
-			*width = next_width;
-
-		dev = dev->bus->self;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(pcie_get_minimum_link);
-
-/**
  * pcie_bandwidth_available - determine minimum link settings of a PCIe
  *			      device and its bandwidth limitation
  * @dev: PCI device to query
@@ -5717,15 +5711,14 @@ static void pci_no_domains(void)
 #endif
 }
 
-#ifdef CONFIG_PCI_DOMAINS
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
 static atomic_t __domain_nr = ATOMIC_INIT(-1);
 
-int pci_get_new_domain_nr(void)
+static int pci_get_new_domain_nr(void)
 {
 	return atomic_inc_return(&__domain_nr);
 }
 
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
 static int of_pci_bus_find_domain_nr(struct device *parent)
 {
 	static int use_dt_domains = -1;
@@ -5780,7 +5773,6 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
 			       acpi_pci_bus_find_domain_nr(bus);
 }
 #endif
-#endif
 
 /**
  * pci_ext_cfg_avail - can we access extended PCI config space?
@@ -5808,6 +5800,9 @@ static int __init pci_setup(char *str)
 		if (*str && (str = pcibios_setup(str)) && *str) {
 			if (!strcmp(str, "nomsi")) {
 				pci_no_msi();
+			} else if (!strncmp(str, "noats", 5)) {
+				pr_info("PCIe: ATS is disabled\n");
+				pcie_ats_disabled = true;
 			} else if (!strcmp(str, "noaer")) {
 				pci_no_aer();
 			} else if (!strncmp(str, "realloc=", 8)) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 023f7cf25bff..c358e7a07f3f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -353,6 +353,11 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
 
 void pci_enable_acs(struct pci_dev *dev);
 
+/* PCI error reporting and recovery */
+void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
+void pcie_do_nonfatal_recovery(struct pci_dev *dev);
+
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
 #ifdef CONFIG_PCIEASPM
 void pcie_aspm_init_link_state(struct pci_dev *pdev);
 void pcie_aspm_exit_link_state(struct pci_dev *pdev);
@@ -407,4 +412,44 @@ static inline u64 pci_rebar_size_to_bytes(int size)
 	return 1ULL << (size + 20);
 }
 
+struct device_node;
+
+#ifdef CONFIG_OF
+int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
+int of_get_pci_domain_nr(struct device_node *node);
+int of_pci_get_max_link_speed(struct device_node *node);
+
+#else
+static inline int
+of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+{
+	return -EINVAL;
+}
+
+static inline int
+of_get_pci_domain_nr(struct device_node *node)
+{
+	return -1;
+}
+
+static inline int
+of_pci_get_max_link_speed(struct device_node *node)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+#if defined(CONFIG_OF_ADDRESS)
+int devm_of_pci_get_host_bridge_resources(struct device *dev,
+			unsigned char busno, unsigned char bus_max,
+			struct list_head *resources, resource_size_t *io_base);
+#else
+static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
+			unsigned char busno, unsigned char bus_max,
+			struct list_head *resources, resource_size_t *io_base)
+{
+	return -EINVAL;
+}
+#endif
+
 #endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 800e1d404a45..03f4e0b3a140 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -2,7 +2,7 @@
 #
 # Makefile for PCI Express features and port driver
 
-pcieportdrv-y			:= portdrv_core.o portdrv_pci.o
+pcieportdrv-y			:= portdrv_core.o portdrv_pci.o err.o
 
 obj-$(CONFIG_PCIEPORTBUS)	+= pcieportdrv.o
 
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 779b3879b1b5..9735c19bf39c 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -94,7 +94,7 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
  */
 static void aer_enable_rootport(struct aer_rpc *rpc)
 {
-	struct pci_dev *pdev = rpc->rpd->port;
+	struct pci_dev *pdev = rpc->rpd;
 	int aer_pos;
 	u16 reg16;
 	u32 reg32;
@@ -136,7 +136,7 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
  */
 static void aer_disable_rootport(struct aer_rpc *rpc)
 {
-	struct pci_dev *pdev = rpc->rpd->port;
+	struct pci_dev *pdev = rpc->rpd;
 	u32 reg32;
 	int pos;
 
@@ -232,7 +232,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
 	/* Initialize Root lock access, e_lock, to Root Error Status Reg */
 	spin_lock_init(&rpc->e_lock);
 
-	rpc->rpd = dev;
+	rpc->rpd = dev->port;
 	INIT_WORK(&rpc->dpc_handler, aer_isr);
 	mutex_init(&rpc->rpc_mutex);
 
@@ -353,10 +353,7 @@ static void aer_error_resume(struct pci_dev *dev)
 	pos = dev->aer_cap;
 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
-	if (dev->error_state == pci_channel_io_normal)
-		status &= ~mask; /* Clear corresponding nonfatal bits */
-	else
-		status &= mask; /* Clear corresponding fatal bits */
+	status &= ~mask; /* Clear corresponding nonfatal bits */
 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
 }
 
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 08b4584f62fe..6e0ad9a68fd9 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -58,7 +58,7 @@ struct aer_err_source {
 };
 
 struct aer_rpc {
-	struct pcie_device *rpd;	/* Root Port device */
+	struct pci_dev *rpd;		/* Root Port device */
 	struct work_struct dpc_handler;
 	struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
 	struct aer_err_info e_info;
@@ -76,36 +76,6 @@ struct aer_rpc {
 					 */
 };
 
-struct aer_broadcast_data {
-	enum pci_channel_state state;
-	enum pci_ers_result result;
-};
-
-static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
-		enum pci_ers_result new)
-{
-	if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
-		return PCI_ERS_RESULT_NO_AER_DRIVER;
-
-	if (new == PCI_ERS_RESULT_NONE)
-		return orig;
-
-	switch (orig) {
-	case PCI_ERS_RESULT_CAN_RECOVER:
-	case PCI_ERS_RESULT_RECOVERED:
-		orig = new;
-		break;
-	case PCI_ERS_RESULT_DISCONNECT:
-		if (new == PCI_ERS_RESULT_NEED_RESET)
-			orig = PCI_ERS_RESULT_NEED_RESET;
-		break;
-	default:
-		break;
-	}
-
-	return orig;
-}
-
 extern struct bus_type pcie_port_bus_type;
 void aer_isr(struct work_struct *work);
 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 0ea5acc40323..42d4f3f32282 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -20,6 +20,7 @@
 #include <linux/slab.h>
 #include <linux/kfifo.h>
 #include "aerdrv.h"
+#include "../../pci.h"
 
 #define	PCI_EXP_AER_FLAGS	(PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
 				 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
@@ -227,329 +228,14 @@ static bool find_source_device(struct pci_dev *parent,
 	return true;
 }
 
-static int report_error_detected(struct pci_dev *dev, void *data)
-{
-	pci_ers_result_t vote;
-	const struct pci_error_handlers *err_handler;
-	struct aer_broadcast_data *result_data;
-	result_data = (struct aer_broadcast_data *) data;
-
-	device_lock(&dev->dev);
-	dev->error_state = result_data->state;
-
-	if (!dev->driver ||
-		!dev->driver->err_handler ||
-		!dev->driver->err_handler->error_detected) {
-		if (result_data->state == pci_channel_io_frozen &&
-			dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
-			/*
-			 * In case of fatal recovery, if one of down-
-			 * stream device has no driver. We might be
-			 * unable to recover because a later insmod
-			 * of a driver for this device is unaware of
-			 * its hw state.
-			 */
-			pci_printk(KERN_DEBUG, dev, "device has %s\n",
-				   dev->driver ?
-				   "no AER-aware driver" : "no driver");
-		}
-
-		/*
-		 * If there's any device in the subtree that does not
-		 * have an error_detected callback, returning
-		 * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
-		 * the subsequent mmio_enabled/slot_reset/resume
-		 * callbacks of "any" device in the subtree. All the
-		 * devices in the subtree are left in the error state
-		 * without recovery.
-		 */
-
-		if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
-			vote = PCI_ERS_RESULT_NO_AER_DRIVER;
-		else
-			vote = PCI_ERS_RESULT_NONE;
-	} else {
-		err_handler = dev->driver->err_handler;
-		vote = err_handler->error_detected(dev, result_data->state);
-		pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
-	}
-
-	result_data->result = merge_result(result_data->result, vote);
-	device_unlock(&dev->dev);
-	return 0;
-}
-
-static int report_mmio_enabled(struct pci_dev *dev, void *data)
-{
-	pci_ers_result_t vote;
-	const struct pci_error_handlers *err_handler;
-	struct aer_broadcast_data *result_data;
-	result_data = (struct aer_broadcast_data *) data;
-
-	device_lock(&dev->dev);
-	if (!dev->driver ||
-		!dev->driver->err_handler ||
-		!dev->driver->err_handler->mmio_enabled)
-		goto out;
-
-	err_handler = dev->driver->err_handler;
-	vote = err_handler->mmio_enabled(dev);
-	result_data->result = merge_result(result_data->result, vote);
-out:
-	device_unlock(&dev->dev);
-	return 0;
-}
-
-static int report_slot_reset(struct pci_dev *dev, void *data)
-{
-	pci_ers_result_t vote;
-	const struct pci_error_handlers *err_handler;
-	struct aer_broadcast_data *result_data;
-	result_data = (struct aer_broadcast_data *) data;
-
-	device_lock(&dev->dev);
-	if (!dev->driver ||
-		!dev->driver->err_handler ||
-		!dev->driver->err_handler->slot_reset)
-		goto out;
-
-	err_handler = dev->driver->err_handler;
-	vote = err_handler->slot_reset(dev);
-	result_data->result = merge_result(result_data->result, vote);
-out:
-	device_unlock(&dev->dev);
-	return 0;
-}
-
-static int report_resume(struct pci_dev *dev, void *data)
-{
-	const struct pci_error_handlers *err_handler;
-
-	device_lock(&dev->dev);
-	dev->error_state = pci_channel_io_normal;
-
-	if (!dev->driver ||
-		!dev->driver->err_handler ||
-		!dev->driver->err_handler->resume)
-		goto out;
-
-	err_handler = dev->driver->err_handler;
-	err_handler->resume(dev);
-	pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
-out:
-	device_unlock(&dev->dev);
-	return 0;
-}
-
-/**
- * broadcast_error_message - handle message broadcast to downstream drivers
- * @dev: pointer to from where in a hierarchy message is broadcasted down
- * @state: error state
- * @error_mesg: message to print
- * @cb: callback to be broadcasted
- *
- * Invoked during error recovery process. Once being invoked, the content
- * of error severity will be broadcasted to all downstream drivers in a
- * hierarchy in question.
- */
-static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
-	enum pci_channel_state state,
-	char *error_mesg,
-	int (*cb)(struct pci_dev *, void *))
-{
-	struct aer_broadcast_data result_data;
-
-	pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
-	result_data.state = state;
-	if (cb == report_error_detected)
-		result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
-	else
-		result_data.result = PCI_ERS_RESULT_RECOVERED;
-
-	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
-		/*
-		 * If the error is reported by a bridge, we think this error
-		 * is related to the downstream link of the bridge, so we
-		 * do error recovery on all subordinates of the bridge instead
-		 * of the bridge and clear the error status of the bridge.
-		 */
-		if (cb == report_error_detected)
-			dev->error_state = state;
-		pci_walk_bus(dev->subordinate, cb, &result_data);
-		if (cb == report_resume) {
-			pci_cleanup_aer_uncorrect_error_status(dev);
-			dev->error_state = pci_channel_io_normal;
-		}
-	} else {
-		/*
-		 * If the error is reported by an end point, we think this
-		 * error is related to the upstream link of the end point.
-		 */
-		if (state == pci_channel_io_normal)
-			/*
-			 * the error is non fatal so the bus is ok, just invoke
-			 * the callback for the function that logged the error.
-			 */
-			cb(dev, &result_data);
-		else
-			pci_walk_bus(dev->bus, cb, &result_data);
-	}
-
-	return result_data.result;
-}
-
-/**
- * default_reset_link - default reset function
- * @dev: pointer to pci_dev data structure
- *
- * Invoked when performing link reset on a Downstream Port or a
- * Root Port with no aer driver.
- */
-static pci_ers_result_t default_reset_link(struct pci_dev *dev)
-{
-	pci_reset_bridge_secondary_bus(dev);
-	pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
-	return PCI_ERS_RESULT_RECOVERED;
-}
-
-static int find_aer_service_iter(struct device *device, void *data)
-{
-	struct pcie_port_service_driver *service_driver, **drv;
-
-	drv = (struct pcie_port_service_driver **) data;
-
-	if (device->bus == &pcie_port_bus_type && device->driver) {
-		service_driver = to_service_driver(device->driver);
-		if (service_driver->service == PCIE_PORT_SERVICE_AER) {
-			*drv = service_driver;
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
-{
-	struct pcie_port_service_driver *drv = NULL;
-
-	device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
-
-	return drv;
-}
-
-static pci_ers_result_t reset_link(struct pci_dev *dev)
-{
-	struct pci_dev *udev;
-	pci_ers_result_t status;
-	struct pcie_port_service_driver *driver;
-
-	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
-		/* Reset this port for all subordinates */
-		udev = dev;
-	} else {
-		/* Reset the upstream component (likely downstream port) */
-		udev = dev->bus->self;
-	}
-
-	/* Use the aer driver of the component firstly */
-	driver = find_aer_service(udev);
-
-	if (driver && driver->reset_link) {
-		status = driver->reset_link(udev);
-	} else if (udev->has_secondary_link) {
-		status = default_reset_link(udev);
-	} else {
-		pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
-			pci_name(udev));
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-
-	if (status != PCI_ERS_RESULT_RECOVERED) {
-		pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
-			pci_name(udev));
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-
-	return status;
-}
-
-/**
- * do_recovery - handle nonfatal/fatal error recovery process
- * @dev: pointer to a pci_dev data structure of agent detecting an error
- * @severity: error severity type
- *
- * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
- * error detected message to all downstream drivers within a hierarchy in
- * question and return the returned code.
- */
-static void do_recovery(struct pci_dev *dev, int severity)
-{
-	pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
-	enum pci_channel_state state;
-
-	if (severity == AER_FATAL)
-		state = pci_channel_io_frozen;
-	else
-		state = pci_channel_io_normal;
-
-	status = broadcast_error_message(dev,
-			state,
-			"error_detected",
-			report_error_detected);
-
-	if (severity == AER_FATAL) {
-		result = reset_link(dev);
-		if (result != PCI_ERS_RESULT_RECOVERED)
-			goto failed;
-	}
-
-	if (status == PCI_ERS_RESULT_CAN_RECOVER)
-		status = broadcast_error_message(dev,
-				state,
-				"mmio_enabled",
-				report_mmio_enabled);
-
-	if (status == PCI_ERS_RESULT_NEED_RESET) {
-		/*
-		 * TODO: Should call platform-specific
-		 * functions to reset slot before calling
-		 * drivers' slot_reset callbacks?
-		 */
-		status = broadcast_error_message(dev,
-				state,
-				"slot_reset",
-				report_slot_reset);
-	}
-
-	if (status != PCI_ERS_RESULT_RECOVERED)
-		goto failed;
-
-	broadcast_error_message(dev,
-				state,
-				"resume",
-				report_resume);
-
-	pci_info(dev, "AER: Device recovery successful\n");
-	return;
-
-failed:
-	pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
-	/* TODO: Should kernel panic here? */
-	pci_info(dev, "AER: Device recovery failed\n");
-}
-
 /**
  * handle_error_source - handle logging error into an event log
- * @aerdev: pointer to pcie_device data structure of the root port
  * @dev: pointer to pci_dev data structure of error source device
  * @info: comprehensive error information
  *
  * Invoked when an error being detected by Root Port.
  */
-static void handle_error_source(struct pcie_device *aerdev,
-	struct pci_dev *dev,
-	struct aer_err_info *info)
+static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
 {
 	int pos;
 
@@ -562,12 +248,13 @@ static void handle_error_source(struct pcie_device *aerdev,
 		if (pos)
 			pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
 					info->status);
-	} else
-		do_recovery(dev, info->severity);
+	} else if (info->severity == AER_NONFATAL)
+		pcie_do_nonfatal_recovery(dev);
+	else if (info->severity == AER_FATAL)
+		pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER);
 }
 
 #ifdef CONFIG_ACPI_APEI_PCIEAER
-static void aer_recover_work_func(struct work_struct *work);
 
 #define AER_RECOVER_RING_ORDER		4
 #define AER_RECOVER_RING_SIZE		(1 << AER_RECOVER_RING_ORDER)
@@ -582,6 +269,30 @@ struct aer_recover_entry {
 
 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
 		    AER_RECOVER_RING_SIZE);
+
+static void aer_recover_work_func(struct work_struct *work)
+{
+	struct aer_recover_entry entry;
+	struct pci_dev *pdev;
+
+	while (kfifo_get(&aer_recover_ring, &entry)) {
+		pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
+						   entry.devfn);
+		if (!pdev) {
+			pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
+			       entry.domain, entry.bus,
+			       PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+			continue;
+		}
+		cper_print_aer(pdev, entry.severity, entry.regs);
+		if (entry.severity == AER_NONFATAL)
+			pcie_do_nonfatal_recovery(pdev);
+		else if (entry.severity == AER_FATAL)
+			pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER);
+		pci_dev_put(pdev);
+	}
+}
+
 /*
  * Mutual exclusion for writers of aer_recover_ring, reader side don't
  * need lock, because there is only one reader and lock is not needed
@@ -611,27 +322,6 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
 	spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
 }
 EXPORT_SYMBOL_GPL(aer_recover_queue);
-
-static void aer_recover_work_func(struct work_struct *work)
-{
-	struct aer_recover_entry entry;
-	struct pci_dev *pdev;
-
-	while (kfifo_get(&aer_recover_ring, &entry)) {
-		pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
-						   entry.devfn);
-		if (!pdev) {
-			pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
-			       entry.domain, entry.bus,
-			       PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
-			continue;
-		}
-		cper_print_aer(pdev, entry.severity, entry.regs);
-		if (entry.severity != AER_CORRECTABLE)
-			do_recovery(pdev, entry.severity);
-		pci_dev_put(pdev);
-	}
-}
 #endif
 
 /**
@@ -695,8 +385,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
 	return 1;
 }
 
-static inline void aer_process_err_devices(struct pcie_device *p_device,
-			struct aer_err_info *e_info)
+static inline void aer_process_err_devices(struct aer_err_info *e_info)
 {
 	int i;
 
@@ -707,19 +396,19 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
 	}
 	for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
 		if (get_device_error_info(e_info->dev[i], e_info))
-			handle_error_source(p_device, e_info->dev[i], e_info);
+			handle_error_source(e_info->dev[i], e_info);
 	}
 }
 
 /**
  * aer_isr_one_error - consume an error detected by root port
- * @p_device: pointer to error root port service device
+ * @rpc: pointer to the root port which holds an error
  * @e_src: pointer to an error source
  */
-static void aer_isr_one_error(struct pcie_device *p_device,
+static void aer_isr_one_error(struct aer_rpc *rpc,
 		struct aer_err_source *e_src)
 {
-	struct aer_rpc *rpc = get_service_data(p_device);
+	struct pci_dev *pdev = rpc->rpd;
 	struct aer_err_info *e_info = &rpc->e_info;
 
 	/*
@@ -734,11 +423,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
 			e_info->multi_error_valid = 1;
 		else
 			e_info->multi_error_valid = 0;
+		aer_print_port_info(pdev, e_info);
 
-		aer_print_port_info(p_device->port, e_info);
-
-		if (find_source_device(p_device->port, e_info))
-			aer_process_err_devices(p_device, e_info);
+		if (find_source_device(pdev, e_info))
+			aer_process_err_devices(e_info);
 	}
 
 	if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
@@ -754,10 +442,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
 		else
 			e_info->multi_error_valid = 0;
 
-		aer_print_port_info(p_device->port, e_info);
+		aer_print_port_info(pdev, e_info);
 
-		if (find_source_device(p_device->port, e_info))
-			aer_process_err_devices(p_device, e_info);
+		if (find_source_device(pdev, e_info))
+			aer_process_err_devices(e_info);
 	}
 }
 
@@ -799,11 +487,10 @@ static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
 void aer_isr(struct work_struct *work)
 {
 	struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
-	struct pcie_device *p_device = rpc->rpd;
 	struct aer_err_source uninitialized_var(e_src);
 
 	mutex_lock(&rpc->rpc_mutex);
 	while (get_e_source(rpc, &e_src))
-		aer_isr_one_error(p_device, &e_src);
+		aer_isr_one_error(rpc, &e_src);
 	mutex_unlock(&rpc->rpc_mutex);
 }
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index cfc89dd57831..4985bdf64c2e 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -163,17 +163,17 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
 	int id = ((dev->bus->number << 8) | dev->devfn);
 
 	if (!info->status) {
-		pci_err(dev, "PCIe Bus Error: severity=%s, type=Unaccessible, id=%04x(Unregistered Agent ID)\n",
-			aer_error_severity_string[info->severity], id);
+		pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
+			aer_error_severity_string[info->severity]);
 		goto out;
 	}
 
 	layer = AER_GET_LAYER_ERROR(info->severity, info->status);
 	agent = AER_GET_AGENT(info->severity, info->status);
 
-	pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+	pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
 		aer_error_severity_string[info->severity],
-		aer_error_layer[layer], id, aer_agent_string[agent]);
+		aer_error_layer[layer], aer_agent_string[agent]);
 
 	pci_err(dev, "  device [%04x:%04x] error status/mask=%08x/%08x\n",
 		dev->vendor, dev->device,
@@ -186,17 +186,21 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
 
 out:
 	if (info->id && info->error_dev_num > 1 && info->id == id)
-		pci_err(dev, "  Error of this Agent(%04x) is reported first\n", id);
+		pci_err(dev, "  Error of this Agent is reported first\n");
 
 	trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
-			info->severity);
+			info->severity, info->tlp_header_valid, &info->tlp);
 }
 
 void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
 {
-	pci_info(dev, "AER: %s%s error received: id=%04x\n",
+	u8 bus = info->id >> 8;
+	u8 devfn = info->id & 0xff;
+
+	pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n",
 		info->multi_error_valid ? "Multiple " : "",
-		aer_error_severity_string[info->severity], info->id);
+		aer_error_severity_string[info->severity],
+		pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 }
 
 #ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -216,28 +220,30 @@ EXPORT_SYMBOL_GPL(cper_severity_to_aer);
 void cper_print_aer(struct pci_dev *dev, int aer_severity,
 		    struct aer_capability_regs *aer)
 {
-	int layer, agent, status_strs_size, tlp_header_valid = 0;
+	int layer, agent, tlp_header_valid = 0;
 	u32 status, mask;
-	const char **status_strs;
+	struct aer_err_info info;
 
 	if (aer_severity == AER_CORRECTABLE) {
 		status = aer->cor_status;
 		mask = aer->cor_mask;
-		status_strs = aer_correctable_error_string;
-		status_strs_size = ARRAY_SIZE(aer_correctable_error_string);
 	} else {
 		status = aer->uncor_status;
 		mask = aer->uncor_mask;
-		status_strs = aer_uncorrectable_error_string;
-		status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
 		tlp_header_valid = status & AER_LOG_TLP_MASKS;
 	}
 
 	layer = AER_GET_LAYER_ERROR(aer_severity, status);
 	agent = AER_GET_AGENT(aer_severity, status);
 
+	memset(&info, 0, sizeof(info));
+	info.severity = aer_severity;
+	info.status = status;
+	info.mask = mask;
+	info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
+
 	pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
-	cper_print_bits("", status, status_strs, status_strs_size);
+	__aer_print_error(dev, &info);
 	pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
 		aer_error_layer[layer], aer_agent_string[agent]);
 
@@ -249,6 +255,6 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
 		__print_tlp_header(dev, &aer->header_log);
 
 	trace_aer_event(dev_name(&dev->dev), (status & ~mask),
-			aer_severity);
+			aer_severity, tlp_header_valid, &aer->header_log);
 }
 #endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index f76eb7704f64..c687c817b47d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -400,6 +400,15 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev,
 		info->l1ss_cap = 0;
 		return;
 	}
+
+	/*
+	 * If we don't have LTR for the entire path from the Root Complex
+	 * to this device, we can't use ASPM L1.2 because it relies on the
+	 * LTR_L1.2_THRESHOLD.  See PCIe r4.0, secs 5.5.4, 6.18.
+	 */
+	if (!pdev->ltr_path)
+		info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
+
 	pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
 			      &info->l1ss_ctl1);
 	pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 8c57d607e603..d6436681c535 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -68,44 +68,35 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
 
 static void dpc_wait_link_inactive(struct dpc_dev *dpc)
 {
-	unsigned long timeout = jiffies + HZ;
 	struct pci_dev *pdev = dpc->dev->port;
-	struct device *dev = &dpc->dev->device;
-	u16 lnk_status;
 
-	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
-	while (lnk_status & PCI_EXP_LNKSTA_DLLLA &&
-					!time_after(jiffies, timeout)) {
-		msleep(10);
-		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
-	}
-	if (lnk_status & PCI_EXP_LNKSTA_DLLLA)
-		dev_warn(dev, "Link state not disabled for DPC event\n");
+	pcie_wait_for_link(pdev, false);
 }
 
-static void dpc_work(struct work_struct *work)
+static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
 {
-	struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
-	struct pci_dev *dev, *temp, *pdev = dpc->dev->port;
-	struct pci_bus *parent = pdev->subordinate;
-	u16 cap = dpc->cap_pos, ctl;
-
-	pci_lock_rescan_remove();
-	list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
-					 bus_list) {
-		pci_dev_get(dev);
-		pci_dev_set_disconnected(dev, NULL);
-		if (pci_has_subordinate(dev))
-			pci_walk_bus(dev->subordinate,
-				     pci_dev_set_disconnected, NULL);
-		pci_stop_and_remove_bus_device(dev);
-		pci_dev_put(dev);
-	}
-	pci_unlock_rescan_remove();
-
+	struct dpc_dev *dpc;
+	struct pcie_device *pciedev;
+	struct device *devdpc;
+	u16 cap, ctl;
+
+	/*
+	 * DPC disables the Link automatically in hardware, so it has
+	 * already been reset by the time we get here.
+	 */
+	devdpc = pcie_port_find_device(pdev, PCIE_PORT_SERVICE_DPC);
+	pciedev = to_pcie_device(devdpc);
+	dpc = get_service_data(pciedev);
+	cap = dpc->cap_pos;
+
+	/*
+	 * Wait until the Link is inactive, then clear DPC Trigger Status
+	 * to allow the Port to leave DPC.
+	 */
 	dpc_wait_link_inactive(dpc);
+
 	if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
-		return;
+		return PCI_ERS_RESULT_DISCONNECT;
 	if (dpc->rp_extensions && dpc->rp_pio_status) {
 		pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS,
 				       dpc->rp_pio_status);
@@ -113,11 +104,22 @@ static void dpc_work(struct work_struct *work)
 	}
 
 	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
-		PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT);
+			      PCI_EXP_DPC_STATUS_TRIGGER);
 
 	pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
 	pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
 			      ctl | PCI_EXP_DPC_CTL_INT_EN);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void dpc_work(struct work_struct *work)
+{
+	struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
+	struct pci_dev *pdev = dpc->dev->port;
+
+	/* We configure DPC so it only triggers on ERR_FATAL */
+	pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
 }
 
 static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
@@ -223,6 +225,9 @@ static irqreturn_t dpc_irq(int irq, void *context)
 	if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
 		dpc_process_rp_pio_error(dpc);
 
+	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
+			      PCI_EXP_DPC_STATUS_INTERRUPT);
+
 	schedule_work(&dpc->work);
 
 	return IRQ_HANDLED;
@@ -270,7 +275,7 @@ static int dpc_probe(struct pcie_device *dev)
 		}
 	}
 
-	ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
+	ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
 	pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
 
 	dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
@@ -288,7 +293,7 @@ static void dpc_remove(struct pcie_device *dev)
 	u16 ctl;
 
 	pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
-	ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN);
+	ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
 	pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
 }
 
@@ -298,6 +303,7 @@ static struct pcie_port_service_driver dpcdriver = {
 	.service	= PCIE_PORT_SERVICE_DPC,
 	.probe		= dpc_probe,
 	.remove		= dpc_remove,
+	.reset_link	= dpc_reset_link,
 };
 
 static int __init dpc_service_init(void)
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
new file mode 100644
index 000000000000..f7ce0cb0b0b7
--- /dev/null
+++ b/drivers/pci/pcie/err.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file implements the error recovery as a core part of PCIe error
+ * reporting. When a PCIe error is delivered, an error message will be
+ * collected and printed to console, then, an error recovery procedure
+ * will be executed by following the PCI error recovery rules.
+ *
+ * Copyright (C) 2006 Intel Corp.
+ *	Tom Long Nguyen (tom.l.nguyen@intel.com)
+ *	Zhang Yanmin (yanmin.zhang@intel.com)
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/aer.h>
+#include "portdrv.h"
+#include "../pci.h"
+
+struct aer_broadcast_data {
+	enum pci_channel_state state;
+	enum pci_ers_result result;
+};
+
+static pci_ers_result_t merge_result(enum pci_ers_result orig,
+				  enum pci_ers_result new)
+{
+	if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
+		return PCI_ERS_RESULT_NO_AER_DRIVER;
+
+	if (new == PCI_ERS_RESULT_NONE)
+		return orig;
+
+	switch (orig) {
+	case PCI_ERS_RESULT_CAN_RECOVER:
+	case PCI_ERS_RESULT_RECOVERED:
+		orig = new;
+		break;
+	case PCI_ERS_RESULT_DISCONNECT:
+		if (new == PCI_ERS_RESULT_NEED_RESET)
+			orig = PCI_ERS_RESULT_NEED_RESET;
+		break;
+	default:
+		break;
+	}
+
+	return orig;
+}
+
+static int report_error_detected(struct pci_dev *dev, void *data)
+{
+	pci_ers_result_t vote;
+	const struct pci_error_handlers *err_handler;
+	struct aer_broadcast_data *result_data;
+
+	result_data = (struct aer_broadcast_data *) data;
+
+	device_lock(&dev->dev);
+	dev->error_state = result_data->state;
+
+	if (!dev->driver ||
+		!dev->driver->err_handler ||
+		!dev->driver->err_handler->error_detected) {
+		if (result_data->state == pci_channel_io_frozen &&
+			dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+			/*
+			 * In case of fatal recovery, if one of down-
+			 * stream device has no driver. We might be
+			 * unable to recover because a later insmod
+			 * of a driver for this device is unaware of
+			 * its hw state.
+			 */
+			pci_printk(KERN_DEBUG, dev, "device has %s\n",
+				   dev->driver ?
+				   "no AER-aware driver" : "no driver");
+		}
+
+		/*
+		 * If there's any device in the subtree that does not
+		 * have an error_detected callback, returning
+		 * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
+		 * the subsequent mmio_enabled/slot_reset/resume
+		 * callbacks of "any" device in the subtree. All the
+		 * devices in the subtree are left in the error state
+		 * without recovery.
+		 */
+
+		if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
+			vote = PCI_ERS_RESULT_NO_AER_DRIVER;
+		else
+			vote = PCI_ERS_RESULT_NONE;
+	} else {
+		err_handler = dev->driver->err_handler;
+		vote = err_handler->error_detected(dev, result_data->state);
+		pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
+	}
+
+	result_data->result = merge_result(result_data->result, vote);
+	device_unlock(&dev->dev);
+	return 0;
+}
+
+static int report_mmio_enabled(struct pci_dev *dev, void *data)
+{
+	pci_ers_result_t vote;
+	const struct pci_error_handlers *err_handler;
+	struct aer_broadcast_data *result_data;
+
+	result_data = (struct aer_broadcast_data *) data;
+
+	device_lock(&dev->dev);
+	if (!dev->driver ||
+		!dev->driver->err_handler ||
+		!dev->driver->err_handler->mmio_enabled)
+		goto out;
+
+	err_handler = dev->driver->err_handler;
+	vote = err_handler->mmio_enabled(dev);
+	result_data->result = merge_result(result_data->result, vote);
+out:
+	device_unlock(&dev->dev);
+	return 0;
+}
+
+static int report_slot_reset(struct pci_dev *dev, void *data)
+{
+	pci_ers_result_t vote;
+	const struct pci_error_handlers *err_handler;
+	struct aer_broadcast_data *result_data;
+
+	result_data = (struct aer_broadcast_data *) data;
+
+	device_lock(&dev->dev);
+	if (!dev->driver ||
+		!dev->driver->err_handler ||
+		!dev->driver->err_handler->slot_reset)
+		goto out;
+
+	err_handler = dev->driver->err_handler;
+	vote = err_handler->slot_reset(dev);
+	result_data->result = merge_result(result_data->result, vote);
+out:
+	device_unlock(&dev->dev);
+	return 0;
+}
+
+static int report_resume(struct pci_dev *dev, void *data)
+{
+	const struct pci_error_handlers *err_handler;
+
+	device_lock(&dev->dev);
+	dev->error_state = pci_channel_io_normal;
+
+	if (!dev->driver ||
+		!dev->driver->err_handler ||
+		!dev->driver->err_handler->resume)
+		goto out;
+
+	err_handler = dev->driver->err_handler;
+	err_handler->resume(dev);
+	pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
+out:
+	device_unlock(&dev->dev);
+	return 0;
+}
+
+/**
+ * default_reset_link - default reset function
+ * @dev: pointer to pci_dev data structure
+ *
+ * Invoked when performing link reset on a Downstream Port or a
+ * Root Port with no aer driver.
+ */
+static pci_ers_result_t default_reset_link(struct pci_dev *dev)
+{
+	pci_reset_bridge_secondary_bus(dev);
+	pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
+{
+	struct pci_dev *udev;
+	pci_ers_result_t status;
+	struct pcie_port_service_driver *driver = NULL;
+
+	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+		/* Reset this port for all subordinates */
+		udev = dev;
+	} else {
+		/* Reset the upstream component (likely downstream port) */
+		udev = dev->bus->self;
+	}
+
+	/* Use the aer driver of the component firstly */
+	driver = pcie_port_find_service(udev, service);
+
+	if (driver && driver->reset_link) {
+		status = driver->reset_link(udev);
+	} else if (udev->has_secondary_link) {
+		status = default_reset_link(udev);
+	} else {
+		pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
+			pci_name(udev));
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	if (status != PCI_ERS_RESULT_RECOVERED) {
+		pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
+			pci_name(udev));
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return status;
+}
+
+/**
+ * broadcast_error_message - handle message broadcast to downstream drivers
+ * @dev: pointer to from where in a hierarchy message is broadcasted down
+ * @state: error state
+ * @error_mesg: message to print
+ * @cb: callback to be broadcasted
+ *
+ * Invoked during error recovery process. Once being invoked, the content
+ * of error severity will be broadcasted to all downstream drivers in a
+ * hierarchy in question.
+ */
+static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
+	enum pci_channel_state state,
+	char *error_mesg,
+	int (*cb)(struct pci_dev *, void *))
+{
+	struct aer_broadcast_data result_data;
+
+	pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
+	result_data.state = state;
+	if (cb == report_error_detected)
+		result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
+	else
+		result_data.result = PCI_ERS_RESULT_RECOVERED;
+
+	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+		/*
+		 * If the error is reported by a bridge, we think this error
+		 * is related to the downstream link of the bridge, so we
+		 * do error recovery on all subordinates of the bridge instead
+		 * of the bridge and clear the error status of the bridge.
+		 */
+		if (cb == report_error_detected)
+			dev->error_state = state;
+		pci_walk_bus(dev->subordinate, cb, &result_data);
+		if (cb == report_resume) {
+			pci_cleanup_aer_uncorrect_error_status(dev);
+			dev->error_state = pci_channel_io_normal;
+		}
+	} else {
+		/*
+		 * If the error is reported by an end point, we think this
+		 * error is related to the upstream link of the end point.
+		 */
+		if (state == pci_channel_io_normal)
+			/*
+			 * the error is non fatal so the bus is ok, just invoke
+			 * the callback for the function that logged the error.
+			 */
+			cb(dev, &result_data);
+		else
+			pci_walk_bus(dev->bus, cb, &result_data);
+	}
+
+	return result_data.result;
+}
+
+/**
+ * pcie_do_fatal_recovery - handle fatal error recovery process
+ * @dev: pointer to a pci_dev data structure of agent detecting an error
+ *
+ * Invoked when an error is fatal. Once being invoked, removes the devices
+ * beneath this AER agent, followed by reset link e.g. secondary bus reset
+ * followed by re-enumeration of devices.
+ */
+void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
+{
+	struct pci_dev *udev;
+	struct pci_bus *parent;
+	struct pci_dev *pdev, *temp;
+	pci_ers_result_t result;
+
+	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+		udev = dev;
+	else
+		udev = dev->bus->self;
+
+	parent = udev->subordinate;
+	pci_lock_rescan_remove();
+	list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
+					 bus_list) {
+		pci_dev_get(pdev);
+		pci_dev_set_disconnected(pdev, NULL);
+		if (pci_has_subordinate(pdev))
+			pci_walk_bus(pdev->subordinate,
+				     pci_dev_set_disconnected, NULL);
+		pci_stop_and_remove_bus_device(pdev);
+		pci_dev_put(pdev);
+	}
+
+	result = reset_link(udev, service);
+
+	if ((service == PCIE_PORT_SERVICE_AER) &&
+	    (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
+		/*
+		 * If the error is reported by a bridge, we think this error
+		 * is related to the downstream link of the bridge, so we
+		 * do error recovery on all subordinates of the bridge instead
+		 * of the bridge and clear the error status of the bridge.
+		 */
+		pci_cleanup_aer_uncorrect_error_status(dev);
+	}
+
+	if (result == PCI_ERS_RESULT_RECOVERED) {
+		if (pcie_wait_for_link(udev, true))
+			pci_rescan_bus(udev->bus);
+		pci_info(dev, "Device recovery from fatal error successful\n");
+	} else {
+		pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+		pci_info(dev, "Device recovery from fatal error failed\n");
+	}
+
+	pci_unlock_rescan_remove();
+}
+
+/**
+ * pcie_do_nonfatal_recovery - handle nonfatal error recovery process
+ * @dev: pointer to a pci_dev data structure of agent detecting an error
+ *
+ * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
+ * error detected message to all downstream drivers within a hierarchy in
+ * question and return the returned code.
+ */
+void pcie_do_nonfatal_recovery(struct pci_dev *dev)
+{
+	pci_ers_result_t status;
+	enum pci_channel_state state;
+
+	state = pci_channel_io_normal;
+
+	status = broadcast_error_message(dev,
+			state,
+			"error_detected",
+			report_error_detected);
+
+	if (status == PCI_ERS_RESULT_CAN_RECOVER)
+		status = broadcast_error_message(dev,
+				state,
+				"mmio_enabled",
+				report_mmio_enabled);
+
+	if (status == PCI_ERS_RESULT_NEED_RESET) {
+		/*
+		 * TODO: Should call platform-specific
+		 * functions to reset slot before calling
+		 * drivers' slot_reset callbacks?
+		 */
+		status = broadcast_error_message(dev,
+				state,
+				"slot_reset",
+				report_slot_reset);
+	}
+
+	if (status != PCI_ERS_RESULT_RECOVERED)
+		goto failed;
+
+	broadcast_error_message(dev,
+				state,
+				"resume",
+				report_resume);
+
+	pci_info(dev, "AER: Device recovery successful\n");
+	return;
+
+failed:
+	pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+
+	/* TODO: Should kernel panic here? */
+	pci_info(dev, "AER: Device recovery failed\n");
+}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d0c6783dbfe3..2bb5db7b53e6 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -11,8 +11,6 @@
 
 #include <linux/compiler.h>
 
-extern bool pcie_ports_native;
-
 /* Service Type */
 #define PCIE_PORT_SERVICE_PME_SHIFT	0	/* Power Management Event */
 #define PCIE_PORT_SERVICE_PME		(1 << PCIE_PORT_SERVICE_PME_SHIFT)
@@ -112,4 +110,7 @@ static inline bool pcie_pme_no_msi(void) { return false; }
 static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
 #endif /* !CONFIG_PCIE_PME */
 
+struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
+							u32 service);
+struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
 #endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
deleted file mode 100644
index 8ab5d434b9c6..000000000000
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PCIe Port Native Services Support, ACPI-Related Part
- *
- * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- */
-
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
-
-#include "aer/aerdrv.h"
-#include "../pci.h"
-#include "portdrv.h"
-
-/**
- * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
- * @port: PCIe Port service for a root port or event collector.
- * @srv_mask: Bit mask of services that can be enabled for @port.
- *
- * Invoked when @port is identified as a PCIe port device.  To avoid conflicts
- * with the BIOS PCIe port native services support requires the BIOS to yield
- * control of these services to the kernel.  The mask of services that the BIOS
- * allows to be enabled for @port is written to @srv_mask.
- *
- * NOTE: It turns out that we cannot do that for individual port services
- * separately, because that would make some systems work incorrectly.
- */
-void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
-{
-	struct acpi_pci_root *root;
-	acpi_handle handle;
-	u32 flags;
-
-	if (acpi_pci_disabled)
-		return;
-
-	handle = acpi_find_root_bridge_handle(port);
-	if (!handle)
-		return;
-
-	root = acpi_pci_find_root(handle);
-	if (!root)
-		return;
-
-	flags = root->osc_control_set;
-
-	*srv_mask = 0;
-	if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
-		*srv_mask |= PCIE_PORT_SERVICE_HP;
-	if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
-		*srv_mask |= PCIE_PORT_SERVICE_PME;
-	if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
-		*srv_mask |= PCIE_PORT_SERVICE_AER | PCIE_PORT_SERVICE_DPC;
-}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index c9c0663db282..e0261ad4bcdd 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -19,6 +19,12 @@
 #include "../pci.h"
 #include "portdrv.h"
 
+struct portdrv_service_data {
+	struct pcie_port_service_driver *drv;
+	struct device *dev;
+	u32 service;
+};
+
 /**
  * release_pcie_device - free PCI Express port service device structure
  * @dev: Port service device to release
@@ -199,7 +205,7 @@ static int get_port_device_capability(struct pci_dev *dev)
 	int services = 0;
 
 	if (dev->is_hotplug_bridge &&
-	    (pcie_ports_native || host->native_hotplug)) {
+	    (pcie_ports_native || host->native_pcie_hotplug)) {
 		services |= PCIE_PORT_SERVICE_HP;
 
 		/*
@@ -398,6 +404,69 @@ static int remove_iter(struct device *dev, void *data)
 	return 0;
 }
 
+static int find_service_iter(struct device *device, void *data)
+{
+	struct pcie_port_service_driver *service_driver;
+	struct portdrv_service_data *pdrvs;
+	u32 service;
+
+	pdrvs = (struct portdrv_service_data *) data;
+	service = pdrvs->service;
+
+	if (device->bus == &pcie_port_bus_type && device->driver) {
+		service_driver = to_service_driver(device->driver);
+		if (service_driver->service == service) {
+			pdrvs->drv = service_driver;
+			pdrvs->dev = device;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * pcie_port_find_service - find the service driver
+ * @dev: PCI Express port the service is associated with
+ * @service: Service to find
+ *
+ * Find PCI Express port service driver associated with given service
+ */
+struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
+							u32 service)
+{
+	struct pcie_port_service_driver *drv;
+	struct portdrv_service_data pdrvs;
+
+	pdrvs.drv = NULL;
+	pdrvs.service = service;
+	device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
+
+	drv = pdrvs.drv;
+	return drv;
+}
+
+/**
+ * pcie_port_find_device - find the struct device
+ * @dev: PCI Express port the service is associated with
+ * @service: For the service to find
+ *
+ * Find the struct device associated with given service on a pci_dev
+ */
+struct device *pcie_port_find_device(struct pci_dev *dev,
+				      u32 service)
+{
+	struct device *device;
+	struct portdrv_service_data pdrvs;
+
+	pdrvs.dev = NULL;
+	pdrvs.service = service;
+	device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
+
+	device = pdrvs.dev;
+	return device;
+}
+
 /**
  * pcie_port_device_remove - unregister PCI Express port service devices
  * @dev: PCI Express port the service devices to unregister are associated with
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ac91b6fd0bcd..ac876e32de4b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -526,12 +526,14 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
 
 	if (bridge->release_fn)
 		bridge->release_fn(bridge);
+
+	pci_free_resource_list(&bridge->windows);
 }
 
 static void pci_release_host_bridge_dev(struct device *dev)
 {
 	devm_pci_release_host_bridge_dev(dev);
-	pci_free_host_bridge(to_pci_host_bridge(dev));
+	kfree(to_pci_host_bridge(dev));
 }
 
 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -552,8 +554,10 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
 	 * OS from interfering.
 	 */
 	bridge->native_aer = 1;
-	bridge->native_hotplug = 1;
+	bridge->native_pcie_hotplug = 1;
+	bridge->native_shpc_hotplug = 1;
 	bridge->native_pme = 1;
+	bridge->native_ltr = 1;
 
 	return bridge;
 }
@@ -882,6 +886,45 @@ free:
 	return err;
 }
 
+static bool pci_bridge_child_ext_cfg_accessible(struct pci_dev *bridge)
+{
+	int pos;
+	u32 status;
+
+	/*
+	 * If extended config space isn't accessible on a bridge's primary
+	 * bus, we certainly can't access it on the secondary bus.
+	 */
+	if (bridge->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
+		return false;
+
+	/*
+	 * PCIe Root Ports and switch ports are PCIe on both sides, so if
+	 * extended config space is accessible on the primary, it's also
+	 * accessible on the secondary.
+	 */
+	if (pci_is_pcie(bridge) &&
+	    (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT ||
+	     pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM ||
+	     pci_pcie_type(bridge) == PCI_EXP_TYPE_DOWNSTREAM))
+		return true;
+
+	/*
+	 * For the other bridge types:
+	 *   - PCI-to-PCI bridges
+	 *   - PCIe-to-PCI/PCI-X forward bridges
+	 *   - PCI/PCI-X-to-PCIe reverse bridges
+	 * extended config space on the secondary side is only accessible
+	 * if the bridge supports PCI-X Mode 2.
+	 */
+	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
+	if (!pos)
+		return false;
+
+	pci_read_config_dword(bridge, pos + PCI_X_STATUS, &status);
+	return status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ);
+}
+
 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
 					   struct pci_dev *bridge, int busnr)
 {
@@ -923,6 +966,16 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
 	pci_set_bus_of_node(child);
 	pci_set_bus_speed(child);
 
+	/*
+	 * Check whether extended config space is accessible on the child
+	 * bus.  Note that we currently assume it is always accessible on
+	 * the root bus.
+	 */
+	if (!pci_bridge_child_ext_cfg_accessible(bridge)) {
+		child->bus_flags |= PCI_BUS_FLAGS_NO_EXTCFG;
+		pci_info(child, "extended config space not accessible\n");
+	}
+
 	/* Set up default resource pointers and names */
 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
@@ -998,6 +1051,8 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
  * already configured by the BIOS and after we are done with all of
  * them, we proceed to assigning numbers to the remaining buses in
  * order to avoid overlaps between old and new bus numbers.
+ *
+ * Return: New subordinate number covering all buses behind this bridge.
  */
 static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
 				  int max, unsigned int available_buses,
@@ -1188,20 +1243,15 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
 		pci_domain_nr(bus), child->number);
 
-	/* Has only triggered on CardBus, fixup is in yenta_socket */
+	/* Check that all devices are accessible */
 	while (bus->parent) {
 		if ((child->busn_res.end > bus->busn_res.end) ||
 		    (child->number > bus->busn_res.end) ||
 		    (child->number < bus->number) ||
 		    (child->busn_res.end < bus->number)) {
-			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
-				&child->busn_res,
-				(bus->number > child->busn_res.end &&
-				 bus->busn_res.end < child->number) ?
-					"wholly" : "partially",
-				bus->self->transparent ? " transparent" : "",
-				dev_name(&bus->dev),
-				&bus->busn_res);
+			dev_info(&dev->dev, "devices behind bridge are unusable because %pR cannot be assigned for them\n",
+				 &child->busn_res);
+			break;
 		}
 		bus = bus->parent;
 	}
@@ -1230,6 +1280,8 @@ out:
  * already configured by the BIOS and after we are done with all of
  * them, we proceed to assigning numbers to the remaining buses in
  * order to avoid overlaps between old and new bus numbers.
+ *
+ * Return: New subordinate number covering all buses behind this bridge.
  */
 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
 {
@@ -1393,6 +1445,9 @@ int pci_cfg_space_size(struct pci_dev *dev)
 	u32 status;
 	u16 class;
 
+	if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
+		return PCI_CFG_SPACE_SIZE;
+
 	class = dev->class >> 8;
 	if (class == PCI_CLASS_BRIDGE_HOST)
 		return pci_cfg_space_size_ext(dev);
@@ -1954,9 +2009,13 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
 static void pci_configure_ltr(struct pci_dev *dev)
 {
 #ifdef CONFIG_PCIEASPM
+	struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
 	u32 cap;
 	struct pci_dev *bridge;
 
+	if (!host->native_ltr)
+		return;
+
 	if (!pci_is_pcie(dev))
 		return;
 
@@ -2638,7 +2697,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
 	for_each_pci_bridge(dev, bus) {
 		cmax = max;
 		max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
-		used_buses += cmax - max;
+
+		/*
+		 * Reserve one bus for each bridge now to avoid extending
+		 * hotplug bridges too much during the second scan below.
+		 */
+		used_buses++;
+		if (cmax - max > 1)
+			used_buses += cmax - max - 1;
 	}
 
 	/* Scan bridges that need to be reconfigured */
@@ -2661,12 +2727,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
 			 * bridges if any.
 			 */
 			buses = available_buses / hotplug_bridges;
-			buses = min(buses, available_buses - used_buses);
+			buses = min(buses, available_buses - used_buses + 1);
 		}
 
 		cmax = max;
 		max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
-		used_buses += max - cmax;
+		/* One bus is already accounted so don't add it again */
+		if (max - cmax > 1)
+			used_buses += max - cmax - 1;
 	}
 
 	/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2990ad1e7c99..f439de848658 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -30,6 +30,162 @@
 #include <asm/dma.h>	/* isa_dma_bridge_buggy */
 #include "pci.h"
 
+static ktime_t fixup_debug_start(struct pci_dev *dev,
+				 void (*fn)(struct pci_dev *dev))
+{
+	if (initcall_debug)
+		pci_info(dev, "calling  %pF @ %i\n", fn, task_pid_nr(current));
+
+	return ktime_get();
+}
+
+static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
+			       void (*fn)(struct pci_dev *dev))
+{
+	ktime_t delta, rettime;
+	unsigned long long duration;
+
+	rettime = ktime_get();
+	delta = ktime_sub(rettime, calltime);
+	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+	if (initcall_debug || duration > 10000)
+		pci_info(dev, "%pF took %lld usecs\n", fn, duration);
+}
+
+static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+			  struct pci_fixup *end)
+{
+	ktime_t calltime;
+
+	for (; f < end; f++)
+		if ((f->class == (u32) (dev->class >> f->class_shift) ||
+		     f->class == (u32) PCI_ANY_ID) &&
+		    (f->vendor == dev->vendor ||
+		     f->vendor == (u16) PCI_ANY_ID) &&
+		    (f->device == dev->device ||
+		     f->device == (u16) PCI_ANY_ID)) {
+			calltime = fixup_debug_start(dev, f->hook);
+			f->hook(dev);
+			fixup_debug_report(dev, calltime, f->hook);
+		}
+}
+
+extern struct pci_fixup __start_pci_fixups_early[];
+extern struct pci_fixup __end_pci_fixups_early[];
+extern struct pci_fixup __start_pci_fixups_header[];
+extern struct pci_fixup __end_pci_fixups_header[];
+extern struct pci_fixup __start_pci_fixups_final[];
+extern struct pci_fixup __end_pci_fixups_final[];
+extern struct pci_fixup __start_pci_fixups_enable[];
+extern struct pci_fixup __end_pci_fixups_enable[];
+extern struct pci_fixup __start_pci_fixups_resume[];
+extern struct pci_fixup __end_pci_fixups_resume[];
+extern struct pci_fixup __start_pci_fixups_resume_early[];
+extern struct pci_fixup __end_pci_fixups_resume_early[];
+extern struct pci_fixup __start_pci_fixups_suspend[];
+extern struct pci_fixup __end_pci_fixups_suspend[];
+extern struct pci_fixup __start_pci_fixups_suspend_late[];
+extern struct pci_fixup __end_pci_fixups_suspend_late[];
+
+static bool pci_apply_fixup_final_quirks;
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+{
+	struct pci_fixup *start, *end;
+
+	switch (pass) {
+	case pci_fixup_early:
+		start = __start_pci_fixups_early;
+		end = __end_pci_fixups_early;
+		break;
+
+	case pci_fixup_header:
+		start = __start_pci_fixups_header;
+		end = __end_pci_fixups_header;
+		break;
+
+	case pci_fixup_final:
+		if (!pci_apply_fixup_final_quirks)
+			return;
+		start = __start_pci_fixups_final;
+		end = __end_pci_fixups_final;
+		break;
+
+	case pci_fixup_enable:
+		start = __start_pci_fixups_enable;
+		end = __end_pci_fixups_enable;
+		break;
+
+	case pci_fixup_resume:
+		start = __start_pci_fixups_resume;
+		end = __end_pci_fixups_resume;
+		break;
+
+	case pci_fixup_resume_early:
+		start = __start_pci_fixups_resume_early;
+		end = __end_pci_fixups_resume_early;
+		break;
+
+	case pci_fixup_suspend:
+		start = __start_pci_fixups_suspend;
+		end = __end_pci_fixups_suspend;
+		break;
+
+	case pci_fixup_suspend_late:
+		start = __start_pci_fixups_suspend_late;
+		end = __end_pci_fixups_suspend_late;
+		break;
+
+	default:
+		/* stupid compiler warning, you would think with an enum... */
+		return;
+	}
+	pci_do_fixups(dev, start, end);
+}
+EXPORT_SYMBOL(pci_fixup_device);
+
+static int __init pci_apply_final_quirks(void)
+{
+	struct pci_dev *dev = NULL;
+	u8 cls = 0;
+	u8 tmp;
+
+	if (pci_cache_line_size)
+		printk(KERN_DEBUG "PCI: CLS %u bytes\n",
+		       pci_cache_line_size << 2);
+
+	pci_apply_fixup_final_quirks = true;
+	for_each_pci_dev(dev) {
+		pci_fixup_device(pci_fixup_final, dev);
+		/*
+		 * If arch hasn't set it explicitly yet, use the CLS
+		 * value shared by all PCI devices.  If there's a
+		 * mismatch, fall back to the default value.
+		 */
+		if (!pci_cache_line_size) {
+			pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
+			if (!cls)
+				cls = tmp;
+			if (!tmp || cls == tmp)
+				continue;
+
+			printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
+			       cls << 2, tmp << 2,
+			       pci_dfl_cache_line_size << 2);
+			pci_cache_line_size = pci_dfl_cache_line_size;
+		}
+	}
+
+	if (!pci_cache_line_size) {
+		printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
+		       cls << 2, pci_dfl_cache_line_size << 2);
+		pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
+	}
+
+	return 0;
+}
+fs_initcall_sync(pci_apply_final_quirks);
+
 /*
  * Decoding should be disabled for a PCI device during BAR sizing to avoid
  * conflict. But doing so may cause problems on host bridge and perhaps other
@@ -43,9 +199,10 @@ static void quirk_mmio_always_on(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
 				PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
 
-/* The Mellanox Tavor device gives false positive parity errors
- * Mark this device with a broken_parity_status, to allow
- * PCI scanning code to "skip" this now blacklisted device.
+/*
+ * The Mellanox Tavor device gives false positive parity errors.  Mark this
+ * device with a broken_parity_status to allow PCI scanning code to "skip"
+ * this now blacklisted device.
  */
 static void quirk_mellanox_tavor(struct pci_dev *dev)
 {
@@ -54,15 +211,19 @@ static void quirk_mellanox_tavor(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
 
-/* Deal with broken BIOSes that neglect to enable passive release,
-   which can cause problems in combination with the 82441FX/PPro MTRRs */
+/*
+ * Deal with broken BIOSes that neglect to enable passive release,
+ * which can cause problems in combination with the 82441FX/PPro MTRRs
+ */
 static void quirk_passive_release(struct pci_dev *dev)
 {
 	struct pci_dev *d = NULL;
 	unsigned char dlc;
 
-	/* We have to make sure a particular bit is set in the PIIX3
-	   ISA bridge, so we have to go out and find it. */
+	/*
+	 * We have to make sure a particular bit is set in the PIIX3
+	 * ISA bridge, so we have to go out and find it.
+	 */
 	while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
 		pci_read_config_byte(d, 0x82, &dlc);
 		if (!(dlc & 1<<1)) {
@@ -75,13 +236,14 @@ static void quirk_passive_release(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_passive_release);
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_passive_release);
 
-/*  The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
-    but VIA don't answer queries. If you happen to have good contacts at VIA
-    ask them for me please -- Alan
-
-    This appears to be BIOS not version dependent. So presumably there is a
-    chipset level fix */
-
+/*
+ * The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a
+ * workaround but VIA don't answer queries. If you happen to have good
+ * contacts at VIA ask them for me please -- Alan
+ *
+ * This appears to be BIOS not version dependent. So presumably there is a
+ * chipset level fix.
+ */
 static void quirk_isa_dma_hangs(struct pci_dev *dev)
 {
 	if (!isa_dma_bridge_buggy) {
@@ -89,10 +251,10 @@ static void quirk_isa_dma_hangs(struct pci_dev *dev)
 		pci_info(dev, "Activating ISA DMA hang workarounds\n");
 	}
 }
-	/*
-	 * Its not totally clear which chipsets are the problematic ones
-	 * We know 82C586 and 82C596 variants are affected.
-	 */
+/*
+ * It's not totally clear which chipsets are the problematic ones.  We know
+ * 82C586 and 82C596 variants are affected.
+ */
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_0,	quirk_isa_dma_hangs);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C596,	quirk_isa_dma_hangs);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82371SB_0,  quirk_isa_dma_hangs);
@@ -121,9 +283,7 @@ static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
 
-/*
- *	Chipsets where PCI->PCI transfers vanish or hang
- */
+/* Chipsets where PCI->PCI transfers vanish or hang */
 static void quirk_nopcipci(struct pci_dev *dev)
 {
 	if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
@@ -146,9 +306,7 @@ static void quirk_nopciamd(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8151_0,	quirk_nopciamd);
 
-/*
- *	Triton requires workarounds to be used by the drivers
- */
+/* Triton requires workarounds to be used by the drivers */
 static void quirk_triton(struct pci_dev *dev)
 {
 	if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
@@ -162,53 +320,62 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82439,	quirk_tr
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82439TX,	quirk_triton);
 
 /*
- *	VIA Apollo KT133 needs PCI latency patch
- *	Made according to a windows driver based patch by George E. Breese
- *	see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
- *	Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
- *	the info on which Mr Breese based his work.
+ * VIA Apollo KT133 needs PCI latency patch
+ * Made according to a Windows driver-based patch by George E. Breese;
+ * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
+ * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for the info on
+ * which Mr Breese based his work.
  *
- *	Updated based on further information from the site and also on
- *	information provided by VIA
+ * Updated based on further information from the site and also on
+ * information provided by VIA
  */
 static void quirk_vialatency(struct pci_dev *dev)
 {
 	struct pci_dev *p;
 	u8 busarb;
-	/* Ok we have a potential problem chipset here. Now see if we have
-	   a buggy southbridge */
 
+	/*
+	 * Ok, we have a potential problem chipset here. Now see if we have
+	 * a buggy southbridge.
+	 */
 	p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
 	if (p != NULL) {
-		/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
-		/* Check for buggy part revisions */
+
+		/*
+		 * 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A;
+		 * thanks Dan Hollis.
+		 * Check for buggy part revisions
+		 */
 		if (p->revision < 0x40 || p->revision > 0x42)
 			goto exit;
 	} else {
 		p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
 		if (p == NULL)	/* No problem parts */
 			goto exit;
+
 		/* Check for buggy part revisions */
 		if (p->revision < 0x10 || p->revision > 0x12)
 			goto exit;
 	}
 
 	/*
-	 *	Ok we have the problem. Now set the PCI master grant to
-	 *	occur every master grant. The apparent bug is that under high
-	 *	PCI load (quite common in Linux of course) you can get data
-	 *	loss when the CPU is held off the bus for 3 bus master requests
-	 *	This happens to include the IDE controllers....
+	 * Ok we have the problem. Now set the PCI master grant to occur
+	 * every master grant. The apparent bug is that under high PCI load
+	 * (quite common in Linux of course) you can get data loss when the
+	 * CPU is held off the bus for 3 bus master requests.  This happens
+	 * to include the IDE controllers....
 	 *
-	 *	VIA only apply this fix when an SB Live! is present but under
-	 *	both Linux and Windows this isn't enough, and we have seen
-	 *	corruption without SB Live! but with things like 3 UDMA IDE
-	 *	controllers. So we ignore that bit of the VIA recommendation..
+	 * VIA only apply this fix when an SB Live! is present but under
+	 * both Linux and Windows this isn't enough, and we have seen
+	 * corruption without SB Live! but with things like 3 UDMA IDE
+	 * controllers. So we ignore that bit of the VIA recommendation..
 	 */
-
 	pci_read_config_byte(dev, 0x76, &busarb);
-	/* Set bit 4 and bi 5 of byte 76 to 0x01
-	   "Master priority rotation on every PCI master grant */
+
+	/*
+	 * Set bit 4 and bit 5 of byte 76 to 0x01
+	 * "Master priority rotation on every PCI master grant"
+	 */
 	busarb &= ~(1<<5);
 	busarb |= (1<<4);
 	pci_write_config_byte(dev, 0x76, busarb);
@@ -224,9 +391,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8363_0,	quirk_vial
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8371_1,	quirk_vialatency);
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8361,		quirk_vialatency);
 
-/*
- *	VIA Apollo VP3 needs ETBF on BT848/878
- */
+/* VIA Apollo VP3 needs ETBF on BT848/878 */
 static void quirk_viaetbf(struct pci_dev *dev)
 {
 	if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
@@ -246,10 +411,9 @@ static void quirk_vsfx(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C576,	quirk_vsfx);
 
 /*
- *	Ali Magik requires workarounds to be used by the drivers
- *	that DMA to AGP space. Latency must be set to 0xA and triton
- *	workaround applied too
- *	[Info kindly provided by ALi]
+ * ALi Magik requires workarounds to be used by the drivers that DMA to AGP
+ * space. Latency must be set to 0xA and Triton workaround applied too.
+ * [Info kindly provided by ALi]
  */
 static void quirk_alimagik(struct pci_dev *dev)
 {
@@ -261,10 +425,7 @@ static void quirk_alimagik(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1647,		quirk_alimagik);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1651,		quirk_alimagik);
 
-/*
- *	Natoma has some interesting boundary conditions with Zoran stuff
- *	at least
- */
+/* Natoma has some interesting boundary conditions with Zoran stuff at least */
 static void quirk_natoma(struct pci_dev *dev)
 {
 	if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
@@ -280,8 +441,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443BX_1,	quir
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443BX_2,	quirk_natoma);
 
 /*
- *  This chip can cause PCI parity errors if config register 0xA0 is read
- *  while DMAs are occurring.
+ * This chip can cause PCI parity errors if config register 0xA0 is read
+ * while DMAs are occurring.
  */
 static void quirk_citrine(struct pci_dev *dev)
 {
@@ -321,8 +482,8 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
 
 /*
- *  S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
- *  If it's needed, re-allocate the region.
+ * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
+ * If it's needed, re-allocate the region.
  */
 static void quirk_s3_64M(struct pci_dev *dev)
 {
@@ -413,8 +574,8 @@ static void quirk_io_region(struct pci_dev *dev, int port,
 }
 
 /*
- *	ATI Northbridge setups MCE the processor if you even
- *	read somewhere between 0x3b0->0x3bb or read 0x3d3
+ * ATI Northbridge setups MCE the processor if you even read somewhere
+ * between 0x3b0->0x3bb or read 0x3d3
  */
 static void quirk_ati_exploding_mce(struct pci_dev *dev)
 {
@@ -429,6 +590,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI,	PCI_DEVICE_ID_ATI_RS100,   quirk_ati_
  * In the AMD NL platform, this device ([1022:7912]) has a class code of
  * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
  * claim it.
+ *
  * But the dwc3 driver is a more specific driver for this device, and we'd
  * prefer to use it instead of xhci. To prevent xhci from claiming the
  * device, change the class code to 0x0c03fe, which the PCI r3.0 spec
@@ -448,11 +610,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
 		quirk_amd_nl_class);
 
 /*
- * Let's make the southbridge information explicit instead
- * of having to worry about people probing the ACPI areas,
- * for example.. (Yes, it happens, and if you read the wrong
- * ACPI register it will put the machine to sleep with no
- * way of waking it up again. Bummer).
+ * Let's make the southbridge information explicit instead of having to
+ * worry about people probing the ACPI areas, for example.. (Yes, it
+ * happens, and if you read the wrong ACPI register it will put the machine
+ * to sleep with no way of waking it up again. Bummer).
  *
  * ALI M7101: Two IO regions pointed to by words at
  *	0xE0 (64 bytes of ACPI registers)
@@ -508,6 +669,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
 			break;
 		size = bit;
 	}
+
 	/*
 	 * For now we only print it out. Eventually we'll want to
 	 * reserve it, but let's get enough confirmation reports first.
@@ -579,8 +741,7 @@ static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
 	 * priority and can't tell whether the legacy device or the one created
 	 * here is really at that address.  This happens on boards with broken
 	 * BIOSes.
-	*/
-
+	 */
 	pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
 	if (enable & ICH4_ACPI_EN)
 		quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
@@ -617,7 +778,8 @@ static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
 				"ICH6 GPIO");
 }
 
-static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
+static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
+				    const char *name, int dynsize)
 {
 	u32 val;
 	u32 size, base;
@@ -641,7 +803,10 @@ static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const cha
 	}
 	base &= ~(size-1);
 
-	/* Just print it out for now. We should reserve it after more debugging */
+	/*
+	 * Just print it out for now. We should reserve it after more
+	 * debugging.
+	 */
 	pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
 }
 
@@ -657,7 +822,8 @@ static void quirk_ich6_lpc(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
 
-static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
+static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
+				    const char *name)
 {
 	u32 val;
 	u32 mask, base;
@@ -668,15 +834,15 @@ static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const cha
 	if (!(val & 1))
 		return;
 
-	/*
-	 * IO base in bits 15:2, mask in bits 23:18, both
-	 * are dword-based
-	 */
+	/* IO base in bits 15:2, mask in bits 23:18, both are dword-based */
 	base = val & 0xfffc;
 	mask = (val >> 16) & 0xfc;
 	mask |= 3;
 
-	/* Just print it out for now. We should reserve it after more debugging */
+	/*
+	 * Just print it out for now. We should reserve it after more
+	 * debugging.
+	 */
 	pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
 }
 
@@ -748,8 +914,8 @@ static void quirk_vt8235_acpi(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8235,	quirk_vt8235_acpi);
 
 /*
- * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
- *	Disable fast back-to-back on the secondary bus segment
+ * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast
+ * back-to-back: Disable fast back-to-back on the secondary bus segment
  */
 static void quirk_xio2000a(struct pci_dev *dev)
 {
@@ -774,8 +940,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
  * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
  * devices to the external APIC.
  *
- * TODO: When we have device-specific interrupt routers,
- * this code will go away from quirks.
+ * TODO: When we have device-specific interrupt routers, this code will go
+ * away from quirks.
  */
 static void quirk_via_ioapic(struct pci_dev *dev)
 {
@@ -816,13 +982,13 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_vt
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_vt8237_bypass_apic_deassert);
 
 /*
- * The AMD io apic can hang the box when an apic irq is masked.
+ * The AMD IO-APIC can hang the box when an APIC IRQ is masked.
  * We check all revs >= B0 (yet not in the pre production!) as the bug
  * is currently marked NoFix
  *
  * We have multiple reports of hangs with this chipset that went away with
  * noapic specified. For the moment we assume it's the erratum. We may be wrong
- * of course. However the advice is demonstrably good even if so..
+ * of course. However the advice is demonstrably good even if so.
  */
 static void quirk_amd_ioapic(struct pci_dev *dev)
 {
@@ -838,7 +1004,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_VIPER_7410,	quirk_a
 
 static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
 {
-	/* Fix for improper SRIOV configuration on Cavium cn88xx  RNM device */
+	/* Fix for improper SR-IOV configuration on Cavium cn88xx RNM device */
 	if (dev->subsystem_device == 0xa118)
 		dev->sriov->link = dev->devfn;
 }
@@ -860,19 +1026,17 @@ static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
 
 /*
- * FIXME: it is questionable that quirk_via_acpi
- * is needed.  It shows up as an ISA bridge, and does not
- * support the PCI_INTERRUPT_LINE register at all.  Therefore
- * it seems like setting the pci_dev's 'irq' to the
- * value of the ACPI SCI interrupt is only done for convenience.
+ * FIXME: it is questionable that quirk_via_acpi() is needed.  It shows up
+ * as an ISA bridge, and does not support the PCI_INTERRUPT_LINE register
+ * at all.  Therefore it seems like setting the pci_dev's IRQ to the value
+ * of the ACPI SCI interrupt is only done for convenience.
  *	-jgarzik
  */
 static void quirk_via_acpi(struct pci_dev *d)
 {
-	/*
-	 * VIA ACPI device: SCI IRQ line in PCI config byte 0x42
-	 */
 	u8 irq;
+
+	/* VIA ACPI device: SCI IRQ line in PCI config byte 0x42 */
 	pci_read_config_byte(d, 0x42, &irq);
 	irq &= 0xf;
 	if (irq && (irq != 2))
@@ -881,11 +1045,7 @@ static void quirk_via_acpi(struct pci_dev *d)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_3,	quirk_via_acpi);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686_4,	quirk_via_acpi);
 
-
-/*
- *	VIA bridges which have VLink
- */
-
+/* VIA bridges which have VLink */
 static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
 
 static void quirk_via_bridge(struct pci_dev *dev)
@@ -893,9 +1053,11 @@ static void quirk_via_bridge(struct pci_dev *dev)
 	/* See what bridge we have and find the device ranges */
 	switch (dev->device) {
 	case PCI_DEVICE_ID_VIA_82C686:
-		/* The VT82C686 is special, it attaches to PCI and can have
-		   any device number. All its subdevices are functions of
-		   that single device. */
+		/*
+		 * The VT82C686 is special; it attaches to PCI and can have
+		 * any device number. All its subdevices are functions of
+		 * that single device.
+		 */
 		via_vlink_dev_lo = PCI_SLOT(dev->devfn);
 		via_vlink_dev_hi = PCI_SLOT(dev->devfn);
 		break;
@@ -923,19 +1085,17 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8235,		quirk_via_b
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_bridge);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237A,	quirk_via_bridge);
 
-/**
- *	quirk_via_vlink		-	VIA VLink IRQ number update
- *	@dev: PCI device
+/*
+ * quirk_via_vlink		-	VIA VLink IRQ number update
+ * @dev: PCI device
  *
- *	If the device we are dealing with is on a PIC IRQ we need to
- *	ensure that the IRQ line register which usually is not relevant
- *	for PCI cards, is actually written so that interrupts get sent
- *	to the right place.
- *	We only do this on systems where a VIA south bridge was detected,
- *	and only for VIA devices on the motherboard (see quirk_via_bridge
- *	above).
+ * If the device we are dealing with is on a PIC IRQ we need to ensure that
+ * the IRQ line register which usually is not relevant for PCI cards, is
+ * actually written so that interrupts get sent to the right place.
+ *
+ * We only do this on systems where a VIA south bridge was detected, and
+ * only for VIA devices on the motherboard (see quirk_via_bridge above).
  */
-
 static void quirk_via_vlink(struct pci_dev *dev)
 {
 	u8 irq, new_irq;
@@ -955,9 +1115,10 @@ static void quirk_via_vlink(struct pci_dev *dev)
 	    PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
 		return;
 
-	/* This is an internal VLink device on a PIC interrupt. The BIOS
-	   ought to have set this but may not have, so we redo it */
-
+	/*
+	 * This is an internal VLink device on a PIC interrupt. The BIOS
+	 * ought to have set this but may not have, so we redo it.
+	 */
 	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
 	if (new_irq != irq) {
 		pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
@@ -969,10 +1130,9 @@ static void quirk_via_vlink(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
 
 /*
- * VIA VT82C598 has its device ID settable and many BIOSes
- * set it to the ID of VT82C597 for backward compatibility.
- * We need to switch it off to be able to recognize the real
- * type of the chip.
+ * VIA VT82C598 has its device ID settable and many BIOSes set it to the ID
+ * of VT82C597 for backward compatibility.  We need to switch it off to be
+ * able to recognize the real type of the chip.
  */
 static void quirk_vt82c598_id(struct pci_dev *dev)
 {
@@ -982,10 +1142,10 @@ static void quirk_vt82c598_id(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C597_0,	quirk_vt82c598_id);
 
 /*
- * CardBus controllers have a legacy base address that enables them
- * to respond as i82365 pcmcia controllers.  We don't want them to
- * do this even if the Linux CardBus driver is not loaded, because
- * the Linux i82365 driver does not (and should not) handle CardBus.
+ * CardBus controllers have a legacy base address that enables them to
+ * respond as i82365 pcmcia controllers.  We don't want them to do this
+ * even if the Linux CardBus driver is not loaded, because the Linux i82365
+ * driver does not (and should not) handle CardBus.
  */
 static void quirk_cardbus_legacy(struct pci_dev *dev)
 {
@@ -997,11 +1157,11 @@ DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
 			PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
 
 /*
- * Following the PCI ordering rules is optional on the AMD762. I'm not
- * sure what the designers were smoking but let's not inhale...
+ * Following the PCI ordering rules is optional on the AMD762. I'm not sure
+ * what the designers were smoking but let's not inhale...
  *
- * To be fair to AMD, it follows the spec by default, its BIOS people
- * who turn it off!
+ * To be fair to AMD, it follows the spec by default, it's BIOS people who
+ * turn it off!
  */
 static void quirk_amd_ordering(struct pci_dev *dev)
 {
@@ -1020,11 +1180,11 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
 
 /*
- *	DreamWorks provided workaround for Dunord I-3000 problem
+ * DreamWorks-provided workaround for Dunord I-3000 problem
  *
- *	This card decodes and responds to addresses not apparently
- *	assigned to it. We force a larger allocation to ensure that
- *	nothing gets put too close to it.
+ * This card decodes and responds to addresses not apparently assigned to
+ * it.  We force a larger allocation to ensure that nothing gets put too
+ * close to it.
  */
 static void quirk_dunord(struct pci_dev *dev)
 {
@@ -1037,10 +1197,9 @@ static void quirk_dunord(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD,	PCI_DEVICE_ID_DUNORD_I3000,	quirk_dunord);
 
 /*
- * i82380FB mobile docking controller: its PCI-to-PCI bridge
- * is subtractive decoding (transparent), and does indicate this
- * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
- * instead of 0x01.
+ * i82380FB mobile docking controller: its PCI-to-PCI bridge is subtractive
+ * decoding (transparent), and does indicate this in the ProgIf.
+ * Unfortunately, the ProgIf value is wrong - 0x80 instead of 0x01.
  */
 static void quirk_transparent_bridge(struct pci_dev *dev)
 {
@@ -1050,10 +1209,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82380FB,	quirk
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA,	0x605,	quirk_transparent_bridge);
 
 /*
- * Common misconfiguration of the MediaGX/Geode PCI master that will
- * reduce PCI bandwidth from 70MB/s to 25MB/s.  See the GXM/GXLV/GX1
- * datasheets found at http://www.national.com/analog for info on what
- * these bits do.  <christer@weinigel.se>
+ * Common misconfiguration of the MediaGX/Geode PCI master that will reduce
+ * PCI bandwidth from 70MB/s to 25MB/s.  See the GXM/GXLV/GX1 datasheets
+ * found at http://www.national.com/analog for info on what these bits do.
+ * <christer@weinigel.se>
  */
 static void quirk_mediagx_master(struct pci_dev *dev)
 {
@@ -1071,9 +1230,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX,	PCI_DEVICE_ID_CYRIX_PCI_MASTER, qui
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX,	PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
 
 /*
- *	Ensure C0 rev restreaming is off. This is normally done by
- *	the BIOS but in the odd case it is not the results are corruption
- *	hence the presence of a Linux check
+ * Ensure C0 rev restreaming is off. This is normally done by the BIOS but
+ * in the odd case it is not the results are corruption hence the presence
+ * of a Linux check.
  */
 static void quirk_disable_pxb(struct pci_dev *pdev)
 {
@@ -1117,9 +1276,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
 
-/*
- *	Serverworks CSB5 IDE does not fully support native mode
- */
+/* Serverworks CSB5 IDE does not fully support native mode */
 static void quirk_svwks_csb5ide(struct pci_dev *pdev)
 {
 	u8 prog;
@@ -1133,9 +1290,7 @@ static void quirk_svwks_csb5ide(struct pci_dev *pdev)
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
 
-/*
- *	Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
- */
+/* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same */
 static void quirk_ide_samemode(struct pci_dev *pdev)
 {
 	u8 prog;
@@ -1151,10 +1306,7 @@ static void quirk_ide_samemode(struct pci_dev *pdev)
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
 
-/*
- * Some ATA devices break if put into D3
- */
-
+/* Some ATA devices break if put into D3 */
 static void quirk_no_ata_d3(struct pci_dev *pdev)
 {
 	pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
@@ -1172,7 +1324,8 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
 				PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
 
-/* This was originally an Alpha specific thing, but it really fits here.
+/*
+ * This was originally an Alpha-specific thing, but it really fits here.
  * The i82375 PCI/EISA bridge appears as non-classified. Fix that.
  */
 static void quirk_eisa_bridge(struct pci_dev *dev)
@@ -1181,7 +1334,6 @@ static void quirk_eisa_bridge(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82375,	quirk_eisa_bridge);
 
-
 /*
  * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
  * is not activated. The myth is that Asus said that they do not want the
@@ -1398,15 +1550,19 @@ static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
 
 	if (likely(!asus_hides_smbus || !asus_rcba_base))
 		return;
+
 	/* read the Function Disable register, dword mode only */
 	val = readl(asus_rcba_base + 0x3418);
-	writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
+
+	/* enable the SMBus device */
+	writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
 }
 
 static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
 {
 	if (likely(!asus_hides_smbus || !asus_rcba_base))
 		return;
+
 	iounmap(asus_rcba_base);
 	asus_rcba_base = NULL;
 	pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
@@ -1423,9 +1579,7 @@ DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6_resume);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6_resume_early);
 
-/*
- * SiS 96x south bridge: BIOS typically hides SMBus device...
- */
+/* SiS 96x south bridge: BIOS typically hides SMBus device...  */
 static void quirk_sis_96x_smbus(struct pci_dev *dev)
 {
 	u8 val = 0;
@@ -1448,7 +1602,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_LPC,		quirk_si
  * ... This is further complicated by the fact that some SiS96x south
  * bridges pretend to be 85C503/5513 instead.  In that case see if we
  * spotted a compatible north bridge to make sure.
- * (pci_find_device doesn't work yet)
+ * (pci_find_device() doesn't work yet)
  *
  * We can also enable the sis96x bit in the discovery register..
  */
@@ -1468,9 +1622,9 @@ static void quirk_sis_503(struct pci_dev *dev)
 	}
 
 	/*
-	 * Ok, it now shows up as a 96x.. run the 96x quirk by
-	 * hand in case it has already been processed.
-	 * (depends on link order, which is apparently not guaranteed)
+	 * Ok, it now shows up as a 96x.  Run the 96x quirk by hand in case
+	 * it has already been processed.  (Depends on link order, which is
+	 * apparently not guaranteed)
 	 */
 	dev->device = devid;
 	quirk_sis_96x_smbus(dev);
@@ -1478,7 +1632,6 @@ static void quirk_sis_503(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_503,		quirk_sis_503);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_503,		quirk_sis_503);
 
-
 /*
  * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
  * and MC97 modem controller are disabled when a second PCI soundcard is
@@ -1515,9 +1668,8 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237, asus_h
 #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
 
 /*
- *	If we are using libata we can drive this chip properly but must
- *	do this early on to make the additional device appear during
- *	the PCI scanning.
+ * If we are using libata we can drive this chip properly but must do this
+ * early on to make the additional device appear during the PCI scanning.
  */
 static void quirk_jmicron_ata(struct pci_dev *pdev)
 {
@@ -1613,14 +1765,18 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
 	if ((pdev->class >> 8) != 0xff00)
 		return;
 
-	/* the first BAR is the location of the IO APIC...we must
+	/*
+	 * The first BAR is the location of the IO-APIC... we must
 	 * not touch this (and it's already covered by the fixmap), so
-	 * forcibly insert it into the resource tree */
+	 * forcibly insert it into the resource tree.
+	 */
 	if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
 		insert_resource(&iomem_resource, &pdev->resource[0]);
 
-	/* The next five BARs all seem to be rubbish, so just clean
-	 * them out */
+	/*
+	 * The next five BARs all seem to be rubbish, so just clean
+	 * them out.
+	 */
 	for (i = 1; i < 6; i++)
 		memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
 }
@@ -1638,8 +1794,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7525_MCH,	quir
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
 
 /*
- * It's possible for the MSI to get corrupted if shpc and acpi
- * are used together on certain PXH-based systems.
+ * It's possible for the MSI to get corrupted if SHPC and ACPI are used
+ * together on certain PXH-based systems.
  */
 static void quirk_pcie_pxh(struct pci_dev *dev)
 {
@@ -1653,15 +1809,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_1,	quirk_pc
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHV,	quirk_pcie_pxh);
 
 /*
- * Some Intel PCI Express chipsets have trouble with downstream
- * device power management.
+ * Some Intel PCI Express chipsets have trouble with downstream device
+ * power management.
  */
 static void quirk_intel_pcie_pm(struct pci_dev *dev)
 {
 	pci_pm_d3_delay = 120;
 	dev->no_d1d2 = 1;
 }
-
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e2, quirk_intel_pcie_pm);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e3, quirk_intel_pcie_pm);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e4, quirk_intel_pcie_pm);
@@ -1723,7 +1878,7 @@ static const struct dmi_system_id boot_interrupt_dmi_table[] = {
 
 /*
  * Boot interrupts on some chipsets cannot be turned off. For these chipsets,
- * remap the original interrupt in the linux kernel to the boot interrupt, so
+ * remap the original interrupt in the Linux kernel to the boot interrupt, so
  * that a PCI device's interrupt handler is installed on the boot interrupt
  * line instead.
  */
@@ -1760,7 +1915,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80332_1,	quirk
  */
 
 /*
- * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no
+ * IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no
  * 300641-004US, section 5.7.3.
  */
 #define INTEL_6300_IOAPIC_ABAR		0x40
@@ -1783,9 +1938,7 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ESB_10,	quirk_disable_intel_boot_interrupt);
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ESB_10,	quirk_disable_intel_boot_interrupt);
 
-/*
- * disable boot interrupts on HT-1000
- */
+/* Disable boot interrupts on HT-1000 */
 #define BC_HT1000_FEATURE_REG		0x64
 #define BC_HT1000_PIC_REGS_ENABLE	(1<<0)
 #define BC_HT1000_MAP_IDX		0xC00
@@ -1816,9 +1969,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS,   PCI_DEVICE_ID_SERVERWORKS_HT1000SB,	quirk_disable_broadcom_boot_interrupt);
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS,   PCI_DEVICE_ID_SERVERWORKS_HT1000SB,	quirk_disable_broadcom_boot_interrupt);
 
-/*
- * disable boot interrupts on AMD and ATI chipsets
- */
+/* Disable boot interrupts on AMD and ATI chipsets */
+
 /*
  * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
  * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
@@ -1894,7 +2046,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
 			 quirk_tc86c001_ide);
 
 /*
- * PLX PCI 9050 PCI Target bridge controller has an errata that prevents the
+ * PLX PCI 9050 PCI Target bridge controller has an erratum that prevents the
  * local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
  * being read correctly if bit 7 of the base address is set.
  * The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
@@ -2087,15 +2239,17 @@ static void quirk_p64h2_1k_io(struct pci_dev *dev)
 		dev->io_window_1k = 1;
 	}
 }
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	0x1460,		quirk_p64h2_1k_io);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
 
-/* Under some circumstances, AER is not linked with extended capabilities.
+/*
+ * Under some circumstances, AER is not linked with extended capabilities.
  * Force it to be linked by setting the corresponding control bit in the
  * config space.
  */
 static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
 {
 	uint8_t b;
+
 	if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
 		if (!(b & 0x20)) {
 			pci_write_config_byte(dev, 0xf41, b | 0x20);
@@ -2125,8 +2279,10 @@ static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
 		PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
 	uint8_t b;
 
-	/* p should contain the first (internal) VT6212L -- see if we have
-	   an external one by searching again */
+	/*
+	 * p should contain the first (internal) VT6212L -- see if we have
+	 * an external one by searching again.
+	 */
 	p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
 	if (!p)
 		return;
@@ -2171,7 +2327,6 @@ static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
 			pcie_set_readrq(dev, 2048);
 	}
 }
-
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
 			 PCI_DEVICE_ID_TIGON3_5719,
 			 quirk_brcm_5719_limit_mrrs);
@@ -2179,14 +2334,16 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
 #ifdef CONFIG_PCIE_IPROC_PLATFORM
 static void quirk_paxc_bridge(struct pci_dev *pdev)
 {
-	/* The PCI config space is shared with the PAXC root port and the first
+	/*
+	 * The PCI config space is shared with the PAXC root port and the first
 	 * Ethernet device.  So, we need to workaround this by telling the PCI
 	 * code that the bridge is not an Ethernet device.
 	 */
 	if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
 		pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
 
-	/* MPSS is not being set properly (as it is currently 0).  This is
+	/*
+	 * MPSS is not being set properly (as it is currently 0).  This is
 	 * because that area of the PCI config space is hard coded to zero, and
 	 * is not modifiable by firmware.  Set this to 2 (e.g., 512 byte MPS)
 	 * so that the MPS can be set to the real max value.
@@ -2197,10 +2354,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
 #endif
 
-/* Originally in EDAC sources for i82875P:
- * Intel tells BIOS developers to hide device 6 which
- * configures the overflow device access containing
- * the DRBs - this is where we expose device 6.
+/*
+ * Originally in EDAC sources for i82875P: Intel tells BIOS developers to
+ * hide device 6 which configures the overflow device access containing the
+ * DRBs - this is where we expose device 6.
  * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
  */
 static void quirk_unhide_mch_dev6(struct pci_dev *dev)
@@ -2212,18 +2369,18 @@ static void quirk_unhide_mch_dev6(struct pci_dev *dev)
 		pci_write_config_byte(dev, 0xF4, reg | 0x02);
 	}
 }
-
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
 			quirk_unhide_mch_dev6);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
 			quirk_unhide_mch_dev6);
 
 #ifdef CONFIG_PCI_MSI
-/* Some chipsets do not support MSI. We cannot easily rely on setting
- * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
- * some other buses controlled by the chipset even if Linux is not
- * aware of it.  Instead of setting the flag on all buses in the
- * machine, simply disable MSI globally.
+/*
+ * Some chipsets do not support MSI. We cannot easily rely on setting
+ * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually some
+ * other buses controlled by the chipset even if Linux is not aware of it.
+ * Instead of setting the flag on all buses in the machine, simply disable
+ * MSI globally.
  */
 static void quirk_disable_all_msi(struct pci_dev *dev)
 {
@@ -2271,8 +2428,10 @@ static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
 
-/* Go through the list of Hypertransport capabilities and
- * return 1 if a HT MSI capability is found and enabled */
+/*
+ * Go through the list of HyperTransport capabilities and return 1 if a HT
+ * MSI capability is found and enabled.
+ */
 static int msi_ht_cap_enabled(struct pci_dev *dev)
 {
 	int pos, ttl = PCI_FIND_CAP_TTL;
@@ -2295,7 +2454,7 @@ static int msi_ht_cap_enabled(struct pci_dev *dev)
 	return 0;
 }
 
-/* Check the hypertransport MSI mapping to know whether MSI is enabled or not */
+/* Check the HyperTransport MSI mapping to know whether MSI is enabled or not */
 static void quirk_msi_ht_cap(struct pci_dev *dev)
 {
 	if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
@@ -2306,8 +2465,9 @@ static void quirk_msi_ht_cap(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
 			quirk_msi_ht_cap);
 
-/* The nVidia CK804 chipset may have 2 HT MSI mappings.
- * MSI are supported if the MSI capability set in any of these mappings.
+/*
+ * The nVidia CK804 chipset may have 2 HT MSI mappings.  MSI is supported
+ * if the MSI capability is set in any of these mappings.
  */
 static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
 {
@@ -2316,8 +2476,9 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
 	if (!dev->subordinate)
 		return;
 
-	/* check HT MSI cap on this chipset and the root one.
-	 * a single one having MSI is enough to be sure that MSI are supported.
+	/*
+	 * Check HT MSI cap on this chipset and the root one.  A single one
+	 * having MSI is enough to be sure that MSI is supported.
 	 */
 	pdev = pci_get_slot(dev->bus, 0);
 	if (!pdev)
@@ -2354,13 +2515,13 @@ static void ht_enable_msi_mapping(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
 			 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
 			 ht_enable_msi_mapping);
-
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
 			 ht_enable_msi_mapping);
 
-/* The P5N32-SLI motherboards from Asus have a problem with msi
- * for the MCP55 NIC. It is not yet determined whether the msi problem
- * also affects other devices. As for now, turn off msi for this device.
+/*
+ * The P5N32-SLI motherboards from Asus have a problem with MSI
+ * for the MCP55 NIC. It is not yet determined whether the MSI problem
+ * also affects other devices. As for now, turn off MSI for this device.
  */
 static void nvenet_msi_disable(struct pci_dev *dev)
 {
@@ -2397,16 +2558,14 @@ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
 	pci_read_config_dword(dev, 0x74, &cfg);
 
 	if (cfg & ((1 << 2) | (1 << 15))) {
-		printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
+		printk(KERN_INFO "Rewriting IRQ routing register on MCP55\n");
 		cfg &= ~((1 << 2) | (1 << 15));
 		pci_write_config_dword(dev, 0x74, cfg);
 	}
 }
-
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
 			PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
 			nvbridge_check_legacy_irq_routing);
-
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
 			PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
 			nvbridge_check_legacy_irq_routing);
@@ -2416,7 +2575,7 @@ static int ht_check_msi_mapping(struct pci_dev *dev)
 	int pos, ttl = PCI_FIND_CAP_TTL;
 	int found = 0;
 
-	/* check if there is HT MSI cap or enabled on this device */
+	/* Check if there is HT MSI cap or enabled on this device */
 	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
 	while (pos && ttl--) {
 		u8 flags;
@@ -2452,7 +2611,7 @@ static int host_bridge_with_leaf(struct pci_dev *host_bridge)
 		if (!dev)
 			continue;
 
-		/* found next host bridge ?*/
+		/* found next host bridge? */
 		pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
 		if (pos != 0) {
 			pci_dev_put(dev);
@@ -2611,27 +2770,27 @@ static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
 {
 	return __nv_msi_ht_cap_quirk(dev, 1);
 }
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
 
 static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
 {
 	return __nv_msi_ht_cap_quirk(dev, 0);
 }
-
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
 
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
-
 static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
 {
 	dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
 }
+
 static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
 {
 	struct pci_dev *p;
 
-	/* SB700 MSI issue will be fixed at HW level from revision A21,
+	/*
+	 * SB700 MSI issue will be fixed at HW level from revision A21;
 	 * we need check PCI REVISION ID of SMBus controller to get SB700
 	 * revision.
 	 */
@@ -2644,6 +2803,7 @@ static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
 		dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
 	pci_dev_put(p);
 }
+
 static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
 {
 	/* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
@@ -2713,55 +2873,56 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
 			quirk_msi_intx_disable_qca_bug);
 #endif /* CONFIG_PCI_MSI */
 
-/* Allow manual resource allocation for PCI hotplug bridges
- * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
- * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
- * kernel fails to allocate resources when hotplug device is
- * inserted and PCI bus is rescanned.
+/*
+ * Allow manual resource allocation for PCI hotplug bridges via
+ * pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For some PCI-PCI
+ * hotplug bridges, like PLX 6254 (former HINT HB6), kernel fails to
+ * allocate resources when hotplug device is inserted and PCI bus is
+ * rescanned.
  */
 static void quirk_hotplug_bridge(struct pci_dev *dev)
 {
 	dev->is_hotplug_bridge = 1;
 }
-
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
 
 /*
- * This is a quirk for the Ricoh MMC controller found as a part of
- * some mulifunction chips.
-
+ * This is a quirk for the Ricoh MMC controller found as a part of some
+ * multifunction chips.
+ *
  * This is very similar and based on the ricoh_mmc driver written by
  * Philip Langdale. Thank you for these magic sequences.
  *
- * These chips implement the four main memory card controllers (SD, MMC, MS, xD)
- * and one or both of cardbus or firewire.
+ * These chips implement the four main memory card controllers (SD, MMC,
+ * MS, xD) and one or both of CardBus or FireWire.
  *
- * It happens that they implement SD and MMC
- * support as separate controllers (and PCI functions). The linux SDHCI
- * driver supports MMC cards but the chip detects MMC cards in hardware
- * and directs them to the MMC controller - so the SDHCI driver never sees
- * them.
+ * It happens that they implement SD and MMC support as separate
+ * controllers (and PCI functions). The Linux SDHCI driver supports MMC
+ * cards but the chip detects MMC cards in hardware and directs them to the
+ * MMC controller - so the SDHCI driver never sees them.
  *
- * To get around this, we must disable the useless MMC controller.
- * At that point, the SDHCI controller will start seeing them
- * It seems to be the case that the relevant PCI registers to deactivate the
- * MMC controller live on PCI function 0, which might be the cardbus controller
- * or the firewire controller, depending on the particular chip in question
+ * To get around this, we must disable the useless MMC controller.  At that
+ * point, the SDHCI controller will start seeing them.  It seems to be the
+ * case that the relevant PCI registers to deactivate the MMC controller
+ * live on PCI function 0, which might be the CardBus controller or the
+ * FireWire controller, depending on the particular chip in question
  *
  * This has to be done early, because as soon as we disable the MMC controller
- * other pci functions shift up one level, e.g. function #2 becomes function
- * #1, and this will confuse the pci core.
+ * other PCI functions shift up one level, e.g. function #2 becomes function
+ * #1, and this will confuse the PCI core.
  */
-
 #ifdef CONFIG_MMC_RICOH_MMC
 static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
 {
-	/* disable via cardbus interface */
 	u8 write_enable;
 	u8 write_target;
 	u8 disable;
 
-	/* disable must be done via function #0 */
+	/*
+	 * Disable via CardBus interface
+	 *
+	 * This must be done via function #0
+	 */
 	if (PCI_FUNC(dev->devfn))
 		return;
 
@@ -2777,7 +2938,7 @@ static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
 	pci_write_config_byte(dev, 0x8E, write_enable);
 	pci_write_config_byte(dev, 0x8D, write_target);
 
-	pci_notice(dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n");
+	pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
 	pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
@@ -2785,17 +2946,20 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
 
 static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
 {
-	/* disable via firewire interface */
 	u8 write_enable;
 	u8 disable;
 
-	/* disable must be done via function #0 */
+	/*
+	 * Disable via FireWire interface
+	 *
+	 * This must be done via function #0
+	 */
 	if (PCI_FUNC(dev->devfn))
 		return;
 	/*
 	 * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
-	 * certain types of SD/MMC cards. Lowering the SD base
-	 * clock frequency from 200Mhz to 50Mhz fixes this issue.
+	 * certain types of SD/MMC cards. Lowering the SD base clock
+	 * frequency from 200Mhz to 50Mhz fixes this issue.
 	 *
 	 * 0x150 - SD2.0 mode enable for changing base clock
 	 *	   frequency to 50Mhz
@@ -2826,7 +2990,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
 	pci_write_config_byte(dev, 0xCB, disable | 0x02);
 	pci_write_config_byte(dev, 0xCA, write_enable);
 
-	pci_notice(dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
+	pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
 	pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
 
 }
@@ -2842,13 +3006,13 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823,
 #define VTUNCERRMSK_REG	0x1ac
 #define VTD_MSK_SPEC_ERRORS	(1 << 31)
 /*
- * This is a quirk for masking vt-d spec defined errors to platform error
- * handling logic. With out this, platforms using Intel 7500, 5500 chipsets
+ * This is a quirk for masking VT-d spec-defined errors to platform error
+ * handling logic. Without this, platforms using Intel 7500, 5500 chipsets
  * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
- * on the RAS config settings of the platform) when a vt-d fault happens.
+ * on the RAS config settings of the platform) when a VT-d fault happens.
  * The resulting SMI caused the system to hang.
  *
- * VT-d spec related errors are already handled by the VT-d OS code, so no
+ * VT-d spec-related errors are already handled by the VT-d OS code, so no
  * need to report the same error through other channels.
  */
 static void vtd_mask_spec_errors(struct pci_dev *dev)
@@ -2874,7 +3038,8 @@ static void fixup_ti816x_class(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
 			      PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
 
-/* Some PCIe devices do not work reliably with the claimed maximum
+/*
+ * Some PCIe devices do not work reliably with the claimed maximum
  * payload size supported.
  */
 static void fixup_mpss_256(struct pci_dev *dev)
@@ -2888,9 +3053,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
 			 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
 
-/* Intel 5000 and 5100 Memory controllers have an errata with read completion
+/*
+ * Intel 5000 and 5100 Memory controllers have an erratum with read completion
  * coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
- * Since there is no way of knowing what the PCIE MPS on each fabric will be
+ * Since there is no way of knowing what the PCIe MPS on each fabric will be
  * until all of the devices are discovered and buses walked, read completion
  * coalescing must be disabled.  Unfortunately, it cannot be re-enabled because
  * it is possible to hotplug a device with MPS of 256B.
@@ -2904,9 +3070,10 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
 	    pcie_bus_config == PCIE_BUS_DEFAULT)
 		return;
 
-	/* Intel errata specifies bits to change but does not say what they are.
-	 * Keeping them magical until such time as the registers and values can
-	 * be explained.
+	/*
+	 * Intel erratum specifies bits to change but does not say what
+	 * they are.  Keeping them magical until such time as the registers
+	 * and values can be explained.
 	 */
 	err = pci_read_config_word(dev, 0x48, &rcc);
 	if (err) {
@@ -2925,7 +3092,7 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
 		return;
 	}
 
-	pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n");
+	pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
 }
 /* Intel 5000 series memory controllers and ports 2-7 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
@@ -2955,11 +3122,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
 
-
 /*
- * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum.  To
- * work around this, query the size it should be configured to by the device and
- * modify the resource end to correspond to this new size.
+ * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum.
+ * To work around this, query the size it should be configured to by the
+ * device and modify the resource end to correspond to this new size.
  */
 static void quirk_intel_ntb(struct pci_dev *dev)
 {
@@ -2981,39 +3147,17 @@ static void quirk_intel_ntb(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
 
-static ktime_t fixup_debug_start(struct pci_dev *dev,
-				 void (*fn)(struct pci_dev *dev))
-{
-	if (initcall_debug)
-		pci_info(dev, "calling  %pF @ %i\n", fn, task_pid_nr(current));
-
-	return ktime_get();
-}
-
-static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
-			       void (*fn)(struct pci_dev *dev))
-{
-	ktime_t delta, rettime;
-	unsigned long long duration;
-
-	rettime = ktime_get();
-	delta = ktime_sub(rettime, calltime);
-	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
-	if (initcall_debug || duration > 10000)
-		pci_info(dev, "%pF took %lld usecs\n", fn, duration);
-}
-
 /*
- * Some BIOS implementations leave the Intel GPU interrupts enabled,
- * even though no one is handling them (f.e. i915 driver is never loaded).
- * Additionally the interrupt destination is not set up properly
+ * Some BIOS implementations leave the Intel GPU interrupts enabled, even
+ * though no one is handling them (e.g., if the i915 driver is never
+ * loaded).  Additionally the interrupt destination is not set up properly
  * and the interrupt ends up -somewhere-.
  *
- * These spurious interrupts are "sticky" and the kernel disables
- * the (shared) interrupt line after 100.000+ generated interrupts.
+ * These spurious interrupts are "sticky" and the kernel disables the
+ * (shared) interrupt line after 100,000+ generated interrupts.
  *
- * Fix it by disabling the still enabled interrupts.
- * This resolves crashes often seen on monitor unplug.
+ * Fix it by disabling the still enabled interrupts.  This resolves crashes
+ * often seen on monitor unplug.
  */
 #define I915_DEIER_REG 0x4400c
 static void disable_igfx_irq(struct pci_dev *dev)
@@ -3101,38 +3245,22 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
  * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
  * DisINTx can be set but the interrupt status bit is non-functional.
  */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
-			quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2,
-			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
 
 static u16 mellanox_broken_intx_devs[] = {
 	PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
@@ -3177,7 +3305,8 @@ static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
 		}
 	}
 
-	/* Getting here means Connect-IB cards and up. Connect-IB has no INTx
+	/*
+	 * Getting here means Connect-IB cards and up. Connect-IB has no INTx
 	 * support so shouldn't be checked further
 	 */
 	if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
@@ -3297,8 +3426,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
  * shutdown before suspend. Otherwise the native host interface (NHI) will not
  * be present after resume if a device was plugged in before suspend.
  *
- * The thunderbolt controller consists of a pcie switch with downstream
- * bridges leading to the NHI and to the tunnel pci bridges.
+ * The Thunderbolt controller consists of a PCIe switch with downstream
+ * bridges leading to the NHI and to the tunnel PCI bridges.
  *
  * This quirk cuts power to the whole chip. Therefore we have to apply it
  * during suspend_noirq of the upstream bridge.
@@ -3316,17 +3445,19 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
 	bridge = ACPI_HANDLE(&dev->dev);
 	if (!bridge)
 		return;
+
 	/*
 	 * SXIO and SXLV are present only on machines requiring this quirk.
-	 * TB bridges in external devices might have the same device id as those
-	 * on the host, but they will not have the associated ACPI methods. This
-	 * implicitly checks that we are at the right bridge.
+	 * Thunderbolt bridges in external devices might have the same
+	 * device ID as those on the host, but they will not have the
+	 * associated ACPI methods. This implicitly checks that we are at
+	 * the right bridge.
 	 */
 	if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
 	    || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
 	    || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
 		return;
-	pci_info(dev, "quirk: cutting power to thunderbolt controller...\n");
+	pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
 
 	/* magic sequence */
 	acpi_execute_simple_method(SXIO, NULL, 1);
@@ -3341,9 +3472,9 @@ DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
 			       quirk_apple_poweroff_thunderbolt);
 
 /*
- * Apple: Wait for the thunderbolt controller to reestablish pci tunnels.
+ * Apple: Wait for the Thunderbolt controller to reestablish PCI tunnels
  *
- * During suspend the thunderbolt controller is reset and all pci
+ * During suspend the Thunderbolt controller is reset and all PCI
  * tunnels are lost. The NHI driver will try to reestablish all tunnels
  * during resume. We have to manually wait for the NHI since there is
  * no parent child relationship between the NHI and the tunneled
@@ -3358,9 +3489,10 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
 		return;
 	if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
 		return;
+
 	/*
-	 * Find the NHI and confirm that we are a bridge on the tb host
-	 * controller and not on a tb endpoint.
+	 * Find the NHI and confirm that we are a bridge on the Thunderbolt
+	 * host controller and not on a Thunderbolt endpoint.
 	 */
 	sibling = pci_get_slot(dev->bus, 0x0);
 	if (sibling == dev)
@@ -3377,7 +3509,7 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
 			nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
 		    || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
 		goto out;
-	pci_info(dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
+	pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n");
 	device_pm_wait_for_dev(&dev->dev, &nhi->dev);
 out:
 	pci_dev_put(nhi);
@@ -3397,142 +3529,6 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
 			       quirk_apple_wait_for_thunderbolt);
 #endif
 
-static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
-			  struct pci_fixup *end)
-{
-	ktime_t calltime;
-
-	for (; f < end; f++)
-		if ((f->class == (u32) (dev->class >> f->class_shift) ||
-		     f->class == (u32) PCI_ANY_ID) &&
-		    (f->vendor == dev->vendor ||
-		     f->vendor == (u16) PCI_ANY_ID) &&
-		    (f->device == dev->device ||
-		     f->device == (u16) PCI_ANY_ID)) {
-			calltime = fixup_debug_start(dev, f->hook);
-			f->hook(dev);
-			fixup_debug_report(dev, calltime, f->hook);
-		}
-}
-
-extern struct pci_fixup __start_pci_fixups_early[];
-extern struct pci_fixup __end_pci_fixups_early[];
-extern struct pci_fixup __start_pci_fixups_header[];
-extern struct pci_fixup __end_pci_fixups_header[];
-extern struct pci_fixup __start_pci_fixups_final[];
-extern struct pci_fixup __end_pci_fixups_final[];
-extern struct pci_fixup __start_pci_fixups_enable[];
-extern struct pci_fixup __end_pci_fixups_enable[];
-extern struct pci_fixup __start_pci_fixups_resume[];
-extern struct pci_fixup __end_pci_fixups_resume[];
-extern struct pci_fixup __start_pci_fixups_resume_early[];
-extern struct pci_fixup __end_pci_fixups_resume_early[];
-extern struct pci_fixup __start_pci_fixups_suspend[];
-extern struct pci_fixup __end_pci_fixups_suspend[];
-extern struct pci_fixup __start_pci_fixups_suspend_late[];
-extern struct pci_fixup __end_pci_fixups_suspend_late[];
-
-static bool pci_apply_fixup_final_quirks;
-
-void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
-{
-	struct pci_fixup *start, *end;
-
-	switch (pass) {
-	case pci_fixup_early:
-		start = __start_pci_fixups_early;
-		end = __end_pci_fixups_early;
-		break;
-
-	case pci_fixup_header:
-		start = __start_pci_fixups_header;
-		end = __end_pci_fixups_header;
-		break;
-
-	case pci_fixup_final:
-		if (!pci_apply_fixup_final_quirks)
-			return;
-		start = __start_pci_fixups_final;
-		end = __end_pci_fixups_final;
-		break;
-
-	case pci_fixup_enable:
-		start = __start_pci_fixups_enable;
-		end = __end_pci_fixups_enable;
-		break;
-
-	case pci_fixup_resume:
-		start = __start_pci_fixups_resume;
-		end = __end_pci_fixups_resume;
-		break;
-
-	case pci_fixup_resume_early:
-		start = __start_pci_fixups_resume_early;
-		end = __end_pci_fixups_resume_early;
-		break;
-
-	case pci_fixup_suspend:
-		start = __start_pci_fixups_suspend;
-		end = __end_pci_fixups_suspend;
-		break;
-
-	case pci_fixup_suspend_late:
-		start = __start_pci_fixups_suspend_late;
-		end = __end_pci_fixups_suspend_late;
-		break;
-
-	default:
-		/* stupid compiler warning, you would think with an enum... */
-		return;
-	}
-	pci_do_fixups(dev, start, end);
-}
-EXPORT_SYMBOL(pci_fixup_device);
-
-
-static int __init pci_apply_final_quirks(void)
-{
-	struct pci_dev *dev = NULL;
-	u8 cls = 0;
-	u8 tmp;
-
-	if (pci_cache_line_size)
-		printk(KERN_DEBUG "PCI: CLS %u bytes\n",
-		       pci_cache_line_size << 2);
-
-	pci_apply_fixup_final_quirks = true;
-	for_each_pci_dev(dev) {
-		pci_fixup_device(pci_fixup_final, dev);
-		/*
-		 * If arch hasn't set it explicitly yet, use the CLS
-		 * value shared by all PCI devices.  If there's a
-		 * mismatch, fall back to the default value.
-		 */
-		if (!pci_cache_line_size) {
-			pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
-			if (!cls)
-				cls = tmp;
-			if (!tmp || cls == tmp)
-				continue;
-
-			printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
-			       cls << 2, tmp << 2,
-			       pci_dfl_cache_line_size << 2);
-			pci_cache_line_size = pci_dfl_cache_line_size;
-		}
-	}
-
-	if (!pci_cache_line_size) {
-		printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
-		       cls << 2, pci_dfl_cache_line_size << 2);
-		pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
-	}
-
-	return 0;
-}
-
-fs_initcall_sync(pci_apply_final_quirks);
-
 /*
  * Following are device-specific reset methods which can be used to
  * reset a single function if other methods (e.g. FLR, PM D0->D3) are
@@ -3602,9 +3598,7 @@ reset_complete:
 	return 0;
 }
 
-/*
- * Device-specific reset method for Chelsio T4-based adapters.
- */
+/* Device-specific reset method for Chelsio T4-based adapters */
 static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
 {
 	u16 old_command;
@@ -3887,7 +3881,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
 /*
  * Some devices have problems with Transaction Layer Packets with the Relaxed
  * Ordering Attribute set.  Such devices should mark themselves and other
- * Device Drivers should check before sending TLPs with RO set.
+ * device drivers should check before sending TLPs with RO set.
  */
 static void quirk_relaxedordering_disable(struct pci_dev *dev)
 {
@@ -3897,7 +3891,7 @@ static void quirk_relaxedordering_disable(struct pci_dev *dev)
 
 /*
  * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
- * Complex has a Flow Control Credit issue which can cause performance
+ * Complex have a Flow Control Credit issue which can cause performance
  * problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
  */
 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
@@ -3958,7 +3952,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED
 			      quirk_relaxedordering_disable);
 
 /*
- * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex
+ * The AMD ARM A1100 (aka "SEATTLE") SoC has a bug in its PCIe Root Complex
  * where Upstream Transaction Layer Packets with the Relaxed Ordering
  * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
  * set.  This is a violation of the PCIe 3.0 Transaction Ordering Rules
@@ -4022,7 +4016,7 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
 	 * This mask/compare operation selects for Physical Function 4 on a
 	 * T5.  We only need to fix up the Root Port once for any of the
 	 * PFs.  PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely
-	 * 0x54xx so we use that one,
+	 * 0x54xx so we use that one.
 	 */
 	if ((pdev->device & 0xff00) == 0x5400)
 		quirk_disable_root_port_attributes(pdev);
@@ -4113,7 +4107,7 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
 static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
 {
 	/*
-	 * X-Gene root matching this quirk do not allow peer-to-peer
+	 * X-Gene Root Ports matching this quirk do not allow peer-to-peer
 	 * transactions with others, allowing masking out these bits as if they
 	 * were unimplemented in the ACS capability.
 	 */
@@ -4230,11 +4224,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
  * 0xa290-0xa29f PCI Express Root port #{0-16}
  * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
  *
+ * Mobile chipsets are also affected, 7th & 8th Generation
+ * Specification update confirms ACS errata 22, status no fix: (7th Generation
+ * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel
+ * Processor Family I/O for U Quad Core Platforms Specification Update,
+ * August 2017, Revision 002, Document#: 334660-002)[6]
+ * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O
+ * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U
+ * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7]
+ *
+ * 0x9d10-0x9d1b PCI Express Root port #{1-12}
+ *
+ * The 300 series chipset suffers from the same bug so include those root
+ * ports here as well.
+ *
+ * 0xa32c-0xa343 PCI Express Root port #{0-24}
+ *
  * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
  * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
  * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
  * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
  * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
+ * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
+ * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
  */
 static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
 {
@@ -4244,6 +4256,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
 	switch (dev->device) {
 	case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
 	case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
+	case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
+	case 0xa32c ... 0xa343:				/* 300 series */
 		return true;
 	}
 
@@ -4361,8 +4375,8 @@ static const struct pci_dev_acs_enabled {
 	{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
 	{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
 	/* QCOM QDF2xxx root ports */
-	{ 0x17cb, 0x400, pci_quirk_qcom_rp_acs },
-	{ 0x17cb, 0x401, pci_quirk_qcom_rp_acs },
+	{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
+	{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
 	/* Intel PCH root ports */
 	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
 	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
@@ -4436,7 +4450,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
 	/*
 	 * Read the RCBA register from the LPC (D31:F0).  PCH root ports
 	 * are D28:F* and therefore get probed before LPC, thus we can't
-	 * use pci_get_slot/pci_read_config_dword here.
+	 * use pci_get_slot()/pci_read_config_dword() here.
 	 */
 	pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
 				  INTEL_LPC_RCBA_REG, &rcba);
@@ -4569,7 +4583,7 @@ int pci_dev_specific_enable_acs(struct pci_dev *dev)
 }
 
 /*
- * The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with
+ * The PCI capabilities list for Intel DH895xCC VFs (device ID 0x0443) with
  * QuickAssist Technology (QAT) is prematurely terminated in hardware.  The
  * Next Capability pointer in the MSI Capability Structure should point to
  * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating
@@ -4630,9 +4644,7 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
 		if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
 			return;
 
-		/*
-		 * Save PCIE cap
-		 */
+		/* Save PCIe cap */
 		state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
 		if (!state)
 			return;
@@ -4653,7 +4665,7 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
 
-/* FLR may cause some 82579 devices to hang. */
+/* FLR may cause some 82579 devices to hang */
 static void quirk_intel_no_flr(struct pci_dev *dev)
 {
 	dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 072784f55ea5..79b1824e83b4 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1943,56 +1943,56 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
 	}
 
 	/*
+	 * There is only one bridge on the bus so it gets all available
+	 * resources which it can then distribute to the possible
+	 * hotplug bridges below.
+	 */
+	if (hotplug_bridges + normal_bridges == 1) {
+		dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
+		if (dev->subordinate) {
+			pci_bus_distribute_available_resources(dev->subordinate,
+				add_list, available_io, available_mmio,
+				available_mmio_pref);
+		}
+		return;
+	}
+
+	/*
 	 * Go over devices on this bus and distribute the remaining
 	 * resource space between hotplug bridges.
 	 */
 	for_each_pci_bridge(dev, bus) {
+		resource_size_t align, io, mmio, mmio_pref;
 		struct pci_bus *b;
 
 		b = dev->subordinate;
-		if (!b)
+		if (!b || !dev->is_hotplug_bridge)
 			continue;
 
-		if (!hotplug_bridges && normal_bridges == 1) {
-			/*
-			 * There is only one bridge on the bus (upstream
-			 * port) so it gets all available resources
-			 * which it can then distribute to the possible
-			 * hotplug bridges below.
-			 */
-			pci_bus_distribute_available_resources(b, add_list,
-				available_io, available_mmio,
-				available_mmio_pref);
-		} else if (dev->is_hotplug_bridge) {
-			resource_size_t align, io, mmio, mmio_pref;
-
-			/*
-			 * Distribute available extra resources equally
-			 * between hotplug-capable downstream ports
-			 * taking alignment into account.
-			 *
-			 * Here hotplug_bridges is always != 0.
-			 */
-			align = pci_resource_alignment(bridge, io_res);
-			io = div64_ul(available_io, hotplug_bridges);
-			io = min(ALIGN(io, align), remaining_io);
-			remaining_io -= io;
-
-			align = pci_resource_alignment(bridge, mmio_res);
-			mmio = div64_ul(available_mmio, hotplug_bridges);
-			mmio = min(ALIGN(mmio, align), remaining_mmio);
-			remaining_mmio -= mmio;
-
-			align = pci_resource_alignment(bridge, mmio_pref_res);
-			mmio_pref = div64_ul(available_mmio_pref,
-					     hotplug_bridges);
-			mmio_pref = min(ALIGN(mmio_pref, align),
-					remaining_mmio_pref);
-			remaining_mmio_pref -= mmio_pref;
-
-			pci_bus_distribute_available_resources(b, add_list, io,
-							       mmio, mmio_pref);
-		}
+		/*
+		 * Distribute available extra resources equally between
+		 * hotplug-capable downstream ports taking alignment into
+		 * account.
+		 *
+		 * Here hotplug_bridges is always != 0.
+		 */
+		align = pci_resource_alignment(bridge, io_res);
+		io = div64_ul(available_io, hotplug_bridges);
+		io = min(ALIGN(io, align), remaining_io);
+		remaining_io -= io;
+
+		align = pci_resource_alignment(bridge, mmio_res);
+		mmio = div64_ul(available_mmio, hotplug_bridges);
+		mmio = min(ALIGN(mmio, align), remaining_mmio);
+		remaining_mmio -= mmio;
+
+		align = pci_resource_alignment(bridge, mmio_pref_res);
+		mmio_pref = div64_ul(available_mmio_pref, hotplug_bridges);
+		mmio_pref = min(ALIGN(mmio_pref, align), remaining_mmio_pref);
+		remaining_mmio_pref -= mmio_pref;
+
+		pci_bus_distribute_available_resources(b, add_list, io, mmio,
+						       mmio_pref);
 	}
 }