summary refs log tree commit diff
path: root/drivers/pci
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-08 15:51:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-08 15:51:57 -0700
commitf263fbb8d60824993c1b64385056a3cfdbb21d45 (patch)
tree9ce14a362e1cb0aef2b4a79d6fb84d883118cce3 /drivers/pci
parent026d15f6b9878794fae1f794cae881ccd65052e5 (diff)
parent6aed468480e8b03ece5a395fe8013e66348a2547 (diff)
downloadlinux-f263fbb8d60824993c1b64385056a3cfdbb21d45.tar.gz
Merge tag 'pci-v4.13-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull PCI updates from Bjorn Helgaas:

  - add sysfs max_link_speed/width, current_link_speed/width (Wong Vee
    Khee)

  - make host bridge IRQ mapping much more generic (Matthew Minter,
    Lorenzo Pieralisi)

  - convert most drivers to pci_scan_root_bus_bridge() (Lorenzo
    Pieralisi)

  - mutex sriov_configure() (Jakub Kicinski)

  - mutex pci_error_handlers callbacks (Christoph Hellwig)

  - split ->reset_notify() into ->reset_prepare()/reset_done()
    (Christoph Hellwig)

  - support multiple PCIe portdrv interrupts for MSI as well as MSI-X
    (Gabriele Paoloni)

  - allocate MSI/MSI-X vector for Downstream Port Containment (Gabriele
    Paoloni)

  - fix MSI IRQ affinity pre/post/min_vecs issue (Michael Hernandez)

  - test INTx masking during enumeration, not at run-time (Piotr Gregor)

  - avoid using device_may_wakeup() for runtime PM (Rafael J. Wysocki)

  - restore the status of PCI devices across hibernation (Chen Yu)

  - keep parent resources that start at 0x0 (Ard Biesheuvel)

  - enable ECRC only if device supports it (Bjorn Helgaas)

  - restore PRI and PASID state after Function-Level Reset (CQ Tang)

  - skip DPC event if device is not present (Keith Busch)

  - check domain when matching SMBIOS info (Sujith Pandel)

  - mark Intel XXV710 NIC INTx masking as broken (Alex Williamson)

  - avoid AMD SB7xx EHCI USB wakeup defect (Kai-Heng Feng)

  - work around long-standing Macbook Pro poweroff issue (Bjorn Helgaas)

  - add Switchtec "running" status flag (Logan Gunthorpe)

  - fix dra7xx incorrect RW1C IRQ register usage (Arvind Yadav)

  - modify xilinx-nwl IRQ chip for legacy interrupts (Bharat Kumar
    Gogada)

  - move VMD SRCU cleanup after bus, child device removal (Jon Derrick)

  - add Faraday clock handling (Linus Walleij)

  - configure Rockchip MPS and reorganize (Shawn Lin)

  - limit Qualcomm TLP size to 2K (hardware issue) (Srinivas Kandagatla)

  - support Tegra MSI 64-bit addressing (Thierry Reding)

  - use Rockchip normal (not privileged) register bank (Shawn Lin)

  - add HiSilicon Kirin SoC PCIe controller driver (Xiaowei Song)

  - add Sigma Designs Tango SMP8759 PCIe controller driver (Marc
    Gonzalez)

  - add MediaTek PCIe host controller support (Ryder Lee)

  - add Qualcomm IPQ4019 support (John Crispin)

  - add HyperV vPCI protocol v1.2 support (Jork Loeser)

  - add i.MX6 regulator support (Quentin Schulz)

* tag 'pci-v4.13-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (113 commits)
  PCI: tango: Add Sigma Designs Tango SMP8759 PCIe host bridge support
  PCI: Add DT binding for Sigma Designs Tango PCIe controller
  PCI: rockchip: Use normal register bank for config accessors
  dt-bindings: PCI: Add documentation for MediaTek PCIe
  PCI: Remove __pci_dev_reset() and pci_dev_reset()
  PCI: Split ->reset_notify() method into ->reset_prepare() and ->reset_done()
  PCI: xilinx: Make of_device_ids const
  PCI: xilinx-nwl: Modify IRQ chip for legacy interrupts
  PCI: vmd: Move SRCU cleanup after bus, child device removal
  PCI: vmd: Correct comment: VMD domains start at 0x10000, not 0x1000
  PCI: versatile: Add local struct device pointers
  PCI: tegra: Do not allocate MSI target memory
  PCI: tegra: Support MSI 64-bit addressing
  PCI: rockchip: Use local struct device pointer consistently
  PCI: rockchip: Check for clk_prepare_enable() errors during resume
  MAINTAINERS: Remove Wenrui Li as Rockchip PCIe driver maintainer
  PCI: rockchip: Configure RC's MPS setting
  PCI: rockchip: Reconfigure configuration space header type
  PCI: rockchip: Split out rockchip_pcie_cfg_configuration_accesses()
  PCI: rockchip: Move configuration accesses into rockchip_pcie_cfg_atu()
  ...
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Makefile17
-rw-r--r--drivers/pci/ats.c87
-rw-r--r--drivers/pci/dwc/Kconfig11
-rw-r--r--drivers/pci/dwc/Makefile1
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c6
-rw-r--r--drivers/pci/dwc/pci-exynos.c2
-rw-r--r--drivers/pci/dwc/pci-imx6.c39
-rw-r--r--drivers/pci/dwc/pci-keystone.c2
-rw-r--r--drivers/pci/dwc/pci-layerscape.c6
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c2
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c43
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c5
-rw-r--r--drivers/pci/dwc/pcie-designware.h2
-rw-r--r--drivers/pci/dwc/pcie-kirin.c517
-rw-r--r--drivers/pci/dwc/pcie-qcom.c440
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c2
-rw-r--r--drivers/pci/host/Kconfig25
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c21
-rw-r--r--drivers/pci/host/pci-ftpci100.c143
-rw-r--r--drivers/pci/host/pci-host-common.c27
-rw-r--r--drivers/pci/host/pci-hyperv.c445
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c42
-rw-r--r--drivers/pci/host/pci-versatile.c36
-rw-r--r--drivers/pci/host/pci-xgene.c23
-rw-r--r--drivers/pci/host/pcie-altera.c24
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c7
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c7
-rw-r--r--drivers/pci/host/pcie-iproc.c135
-rw-r--r--drivers/pci/host/pcie-mediatek.c554
-rw-r--r--drivers/pci/host/pcie-rcar.c40
-rw-r--r--drivers/pci/host/pcie-rockchip.c147
-rw-r--r--drivers/pci/host/pcie-tango.c141
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c79
-rw-r--r--drivers/pci/host/pcie-xilinx.c36
-rw-r--r--drivers/pci/host/vmd.c10
-rw-r--r--drivers/pci/iov.c4
-rw-r--r--drivers/pci/msi.c14
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/pci-label.c7
-rw-r--r--drivers/pci/pci-sysfs.c204
-rw-r--r--drivers/pci/pci.c227
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/pcie-dpc.c4
-rw-r--r--drivers/pci/pcie/portdrv.h7
-rw-r--r--drivers/pci/pcie/portdrv_core.c104
-rw-r--r--drivers/pci/probe.c140
-rw-r--r--drivers/pci/quirks.c19
-rw-r--r--drivers/pci/setup-irq.c45
-rw-r--r--drivers/pci/switch/switchtec.c40
52 files changed, 3297 insertions, 652 deletions
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 462c1f5f5546..66a21acad952 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,7 +4,8 @@
 
 obj-y		+= access.o bus.o probe.o host-bridge.o remove.o pci.o \
 			pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
-			irq.o vpd.o setup-bus.o vc.o mmap.o
+			irq.o vpd.o setup-bus.o vc.o mmap.o setup-irq.o
+
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_SYSFS) += slot.o
 
@@ -29,20 +30,6 @@ obj-$(CONFIG_PCI_ATS) += ats.o
 obj-$(CONFIG_PCI_IOV) += iov.o
 
 #
-# Some architectures use the generic PCI setup functions
-#
-obj-$(CONFIG_ALPHA) += setup-irq.o
-obj-$(CONFIG_ARC) += setup-irq.o
-obj-$(CONFIG_ARM) += setup-irq.o
-obj-$(CONFIG_ARM64) += setup-irq.o
-obj-$(CONFIG_UNICORE32) += setup-irq.o
-obj-$(CONFIG_SUPERH) += setup-irq.o
-obj-$(CONFIG_MIPS) += setup-irq.o
-obj-$(CONFIG_TILE) += setup-irq.o
-obj-$(CONFIG_SPARC_LEON) += setup-irq.o
-obj-$(CONFIG_M68K) += setup-irq.o
-
-#
 # ACPI Related PCI FW Functions
 # ACPI _DSM provided firmware instance and string name
 #
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index eeb9fb2b47aa..ad8ddbbbf245 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -153,23 +153,27 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
 	u32 max_requests;
 	int pos;
 
+	if (WARN_ON(pdev->pri_enabled))
+		return -EBUSY;
+
 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
 	if (!pos)
 		return -EINVAL;
 
-	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
 	pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
-	if ((control & PCI_PRI_CTRL_ENABLE) ||
-	    !(status & PCI_PRI_STATUS_STOPPED))
+	if (!(status & PCI_PRI_STATUS_STOPPED))
 		return -EBUSY;
 
 	pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
 	reqs = min(max_requests, reqs);
+	pdev->pri_reqs_alloc = reqs;
 	pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
 
-	control |= PCI_PRI_CTRL_ENABLE;
+	control = PCI_PRI_CTRL_ENABLE;
 	pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
 
+	pdev->pri_enabled = 1;
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(pci_enable_pri);
@@ -185,6 +189,9 @@ void pci_disable_pri(struct pci_dev *pdev)
 	u16 control;
 	int pos;
 
+	if (WARN_ON(!pdev->pri_enabled))
+		return;
+
 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
 	if (!pos)
 		return;
@@ -192,10 +199,34 @@ void pci_disable_pri(struct pci_dev *pdev)
 	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
 	control &= ~PCI_PRI_CTRL_ENABLE;
 	pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+	pdev->pri_enabled = 0;
 }
 EXPORT_SYMBOL_GPL(pci_disable_pri);
 
 /**
+ * pci_restore_pri_state - Restore PRI
+ * @pdev: PCI device structure
+ */
+void pci_restore_pri_state(struct pci_dev *pdev)
+{
+	u16 control = PCI_PRI_CTRL_ENABLE;
+	u32 reqs = pdev->pri_reqs_alloc;
+	int pos;
+
+	if (!pdev->pri_enabled)
+		return;
+
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+	if (!pos)
+		return;
+
+	pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
+	pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+}
+EXPORT_SYMBOL_GPL(pci_restore_pri_state);
+
+/**
  * pci_reset_pri - Resets device's PRI state
  * @pdev: PCI device structure
  *
@@ -207,16 +238,14 @@ int pci_reset_pri(struct pci_dev *pdev)
 	u16 control;
 	int pos;
 
+	if (WARN_ON(pdev->pri_enabled))
+		return -EBUSY;
+
 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
 	if (!pos)
 		return -EINVAL;
 
-	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
-	if (control & PCI_PRI_CTRL_ENABLE)
-		return -EBUSY;
-
-	control |= PCI_PRI_CTRL_RESET;
-
+	control = PCI_PRI_CTRL_RESET;
 	pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
 
 	return 0;
@@ -239,16 +268,14 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
 	u16 control, supported;
 	int pos;
 
+	if (WARN_ON(pdev->pasid_enabled))
+		return -EBUSY;
+
 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
 	if (!pos)
 		return -EINVAL;
 
-	pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control);
 	pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
-
-	if (control & PCI_PASID_CTRL_ENABLE)
-		return -EINVAL;
-
 	supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
 
 	/* User wants to enable anything unsupported? */
@@ -256,9 +283,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
 		return -EINVAL;
 
 	control = PCI_PASID_CTRL_ENABLE | features;
+	pdev->pasid_features = features;
 
 	pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
 
+	pdev->pasid_enabled = 1;
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(pci_enable_pasid);
@@ -266,22 +296,47 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid);
 /**
  * pci_disable_pasid - Disable the PASID capability
  * @pdev: PCI device structure
- *
  */
 void pci_disable_pasid(struct pci_dev *pdev)
 {
 	u16 control = 0;
 	int pos;
 
+	if (WARN_ON(!pdev->pasid_enabled))
+		return;
+
 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
 	if (!pos)
 		return;
 
 	pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+
+	pdev->pasid_enabled = 0;
 }
 EXPORT_SYMBOL_GPL(pci_disable_pasid);
 
 /**
+ * pci_restore_pasid_state - Restore PASID capabilities
+ * @pdev: PCI device structure
+ */
+void pci_restore_pasid_state(struct pci_dev *pdev)
+{
+	u16 control;
+	int pos;
+
+	if (!pdev->pasid_enabled)
+		return;
+
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+	if (!pos)
+		return;
+
+	control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features;
+	pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+}
+EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
+
+/**
  * pci_pasid_features - Check which PASID features are supported
  * @pdev: PCI device structure
  *
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index b7e15526d676..d275aadc47ee 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -16,6 +16,7 @@ config PCIE_DW_EP
 
 config PCI_DRA7XX
 	bool "TI DRA7xx PCIe controller"
+	depends on SOC_DRA7XX || COMPILE_TEST
 	depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
 	depends on OF && HAS_IOMEM && TI_PIPE3
 	help
@@ -158,4 +159,14 @@ config PCIE_ARTPEC6
 	  Say Y here to enable PCIe controller support on Axis ARTPEC-6
 	  SoCs.  This PCIe controller uses the DesignWare core.
 
+config PCIE_KIRIN
+	depends on OF && ARM64
+	bool "HiSilicon Kirin series SoCs PCIe controllers"
+	depends on PCI
+	select PCIEPORTBUS
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want PCIe controller support
+	  on HiSilicon Kirin series SoCs.
+
 endmenu
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index f31a8596442a..c61be9738cce 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
 obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
 obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
 obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
 
 # The following drivers are for devices that use the generic ACPI
 # pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 8decf46cf525..f2fc5f47064e 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -174,7 +174,7 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
 {
 	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
-			   ~LEG_EP_INTERRUPTS & ~MSI);
+			   LEG_EP_INTERRUPTS | MSI);
 
 	dra7xx_pcie_writel(dra7xx,
 			   PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
@@ -184,7 +184,7 @@ static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
 {
 	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
-			   ~INTERRUPTS);
+			   INTERRUPTS);
 	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
 			   INTERRUPTS);
 }
@@ -208,7 +208,7 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
 	dra7xx_pcie_enable_interrupts(dra7xx);
 }
 
-static struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
+static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
 	.host_init = dra7xx_pcie_host_init,
 };
 
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 546082ad5a3f..c78c06552590 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -590,7 +590,7 @@ static void exynos_pcie_host_init(struct pcie_port *pp)
 	exynos_pcie_enable_interrupts(ep);
 }
 
-static struct dw_pcie_host_ops exynos_pcie_host_ops = {
+static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
 	.rd_own_conf = exynos_pcie_rd_own_conf,
 	.wr_own_conf = exynos_pcie_wr_own_conf,
 	.host_init = exynos_pcie_host_init,
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 19a289b8cc94..bf5c3616e344 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -24,6 +24,7 @@
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
 #include <linux/resource.h>
 #include <linux/signal.h>
 #include <linux/types.h>
@@ -59,6 +60,7 @@ struct imx6_pcie {
 	u32			tx_swing_full;
 	u32			tx_swing_low;
 	int			link_gen;
+	struct regulator	*vpcie;
 };
 
 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -284,6 +286,8 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
 
 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
 {
+	struct device *dev = imx6_pcie->pci->dev;
+
 	switch (imx6_pcie->variant) {
 	case IMX7D:
 		reset_control_assert(imx6_pcie->pciephy_reset);
@@ -310,6 +314,14 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
 				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
 		break;
 	}
+
+	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
+		int ret = regulator_disable(imx6_pcie->vpcie);
+
+		if (ret)
+			dev_err(dev, "failed to disable vpcie regulator: %d\n",
+				ret);
+	}
 }
 
 static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
@@ -376,10 +388,19 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
 	struct device *dev = pci->dev;
 	int ret;
 
+	if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
+		ret = regulator_enable(imx6_pcie->vpcie);
+		if (ret) {
+			dev_err(dev, "failed to enable vpcie regulator: %d\n",
+				ret);
+			return;
+		}
+	}
+
 	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
 	if (ret) {
 		dev_err(dev, "unable to enable pcie_phy clock\n");
-		return;
+		goto err_pcie_phy;
 	}
 
 	ret = clk_prepare_enable(imx6_pcie->pcie_bus);
@@ -439,6 +460,13 @@ err_pcie:
 	clk_disable_unprepare(imx6_pcie->pcie_bus);
 err_pcie_bus:
 	clk_disable_unprepare(imx6_pcie->pcie_phy);
+err_pcie_phy:
+	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
+		ret = regulator_disable(imx6_pcie->vpcie);
+		if (ret)
+			dev_err(dev, "failed to disable vpcie regulator: %d\n",
+				ret);
+	}
 }
 
 static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
@@ -629,7 +657,7 @@ static int imx6_pcie_link_up(struct dw_pcie *pci)
 			PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
 }
 
-static struct dw_pcie_host_ops imx6_pcie_host_ops = {
+static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
 	.host_init = imx6_pcie_host_init,
 };
 
@@ -802,6 +830,13 @@ static int imx6_pcie_probe(struct platform_device *pdev)
 	if (ret)
 		imx6_pcie->link_gen = 1;
 
+	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+	if (IS_ERR(imx6_pcie->vpcie)) {
+		if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		imx6_pcie->vpcie = NULL;
+	}
+
 	platform_set_drvdata(pdev, imx6_pcie);
 
 	ret = imx6_add_pcie_port(imx6_pcie, pdev);
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index fcc9723bad6e..4783cec1f78d 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -291,7 +291,7 @@ static void __init ks_pcie_host_init(struct pcie_port *pp)
 			"Asynchronous external abort");
 }
 
-static struct dw_pcie_host_ops keystone_pcie_host_ops = {
+static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
 	.rd_other_conf = ks_dw_pcie_rd_other_conf,
 	.wr_other_conf = ks_dw_pcie_wr_other_conf,
 	.host_init = ks_pcie_host_init,
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index 27d638c4e134..fd861289ad8b 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -39,7 +39,7 @@ struct ls_pcie_drvdata {
 	u32 lut_offset;
 	u32 ltssm_shift;
 	u32 lut_dbg;
-	struct dw_pcie_host_ops *ops;
+	const struct dw_pcie_host_ops *ops;
 	const struct dw_pcie_ops *dw_pcie_ops;
 };
 
@@ -185,12 +185,12 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp,
 	return 0;
 }
 
-static struct dw_pcie_host_ops ls1021_pcie_host_ops = {
+static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
 	.host_init = ls1021_pcie_host_init,
 	.msi_host_init = ls_pcie_msi_host_init,
 };
 
-static struct dw_pcie_host_ops ls_pcie_host_ops = {
+static const struct dw_pcie_host_ops ls_pcie_host_ops = {
 	.host_init = ls_pcie_host_init,
 	.msi_host_init = ls_pcie_msi_host_init,
 };
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index 495b023042b3..ea8f34af6a85 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -160,7 +160,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
 	return IRQ_HANDLED;
 }
 
-static struct dw_pcie_host_ops armada8k_pcie_host_ops = {
+static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
 	.host_init = armada8k_pcie_host_init,
 };
 
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index 82a04acc42fd..01c6f7823672 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -184,7 +184,7 @@ static void artpec6_pcie_host_init(struct pcie_port *pp)
 	artpec6_pcie_enable_interrupts(artpec6_pcie);
 }
 
-static struct dw_pcie_host_ops artpec6_pcie_host_ops = {
+static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
 	.host_init = artpec6_pcie_host_init,
 };
 
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 28ed32ba4f1b..d29c020da082 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -280,9 +280,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
 	struct device_node *np = dev->of_node;
 	struct platform_device *pdev = to_platform_device(dev);
 	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
 	struct resource *cfg_res;
 	int i, ret;
-	LIST_HEAD(res);
 	struct resource_entry *win, *tmp;
 
 	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
@@ -295,16 +295,21 @@ int dw_pcie_host_init(struct pcie_port *pp)
 		dev_err(dev, "missing *config* reg space\n");
 	}
 
-	ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
+	bridge = pci_alloc_host_bridge(0);
+	if (!bridge)
+		return -ENOMEM;
+
+	ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
+					&bridge->windows, &pp->io_base);
 	if (ret)
 		return ret;
 
-	ret = devm_request_pci_bus_resources(dev, &res);
+	ret = devm_request_pci_bus_resources(dev, &bridge->windows);
 	if (ret)
 		goto error;
 
 	/* Get the I/O and memory ranges from DT */
-	resource_list_for_each_entry_safe(win, tmp, &res) {
+	resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
 		switch (resource_type(win->res)) {
 		case IORESOURCE_IO:
 			ret = pci_remap_iospace(win->res, pp->io_base);
@@ -400,27 +405,27 @@ int dw_pcie_host_init(struct pcie_port *pp)
 		pp->ops->host_init(pp);
 
 	pp->root_bus_nr = pp->busn->start;
+
+	bridge->dev.parent = dev;
+	bridge->sysdata = pp;
+	bridge->busnr = pp->root_bus_nr;
+	bridge->ops = &dw_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
-		bus = pci_scan_root_bus_msi(dev, pp->root_bus_nr,
-					    &dw_pcie_ops, pp, &res,
-					    &dw_pcie_msi_chip);
+		bridge->msi = &dw_pcie_msi_chip;
 		dw_pcie_msi_chip.dev = dev;
-	} else
-		bus = pci_scan_root_bus(dev, pp->root_bus_nr, &dw_pcie_ops,
-					pp, &res);
-	if (!bus) {
-		ret = -ENOMEM;
-		goto error;
 	}
 
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret)
+		goto error;
+
+	bus = bridge->bus;
+
 	if (pp->ops->scan_bus)
 		pp->ops->scan_bus(pp);
 
-#ifdef CONFIG_ARM
-	/* support old dtbs that incorrectly describe IRQs */
-	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
-#endif
-
 	pci_bus_size_bridges(bus);
 	pci_bus_assign_resources(bus);
 
@@ -431,7 +436,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 	return 0;
 
 error:
-	pci_free_resource_list(&res);
+	pci_free_host_bridge(bridge);
 	return ret;
 }
 
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 32091b32f6e1..091b4e7ad059 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -46,7 +46,7 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp)
 		dw_pcie_msi_init(pp);
 }
 
-static struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
+static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
 	.host_init = dw_plat_pcie_host_init,
 };
 
@@ -67,7 +67,8 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
 
 		ret = devm_request_irq(dev, pp->msi_irq,
 					dw_plat_pcie_msi_irq_handler,
-					IRQF_SHARED, "dw-plat-pcie-msi", pp);
+					IRQF_SHARED | IRQF_NO_THREAD,
+					"dw-plat-pcie-msi", pp);
 		if (ret) {
 			dev_err(dev, "failed to request MSI IRQ\n");
 			return ret;
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index c6a840575796..b4d2a89f8e58 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -162,7 +162,7 @@ struct pcie_port {
 	struct resource		*mem;
 	struct resource		*busn;
 	int			irq;
-	struct dw_pcie_host_ops	*ops;
+	const struct dw_pcie_host_ops *ops;
 	int			msi_irq;
 	struct irq_domain	*irq_domain;
 	unsigned long		msi_data;
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
new file mode 100644
index 000000000000..33fddb9f6739
--- /dev/null
+++ b/drivers/pci/dwc/pcie-kirin.c
@@ -0,0 +1,517 @@
+/*
+ * PCIe host controller driver for Kirin Phone SoCs
+ *
+ * Copyright (C) 2017 Hilisicon Electronics Co., Ltd.
+ *		http://www.huawei.com
+ *
+ * Author: Xiaowei Song <songxiaowei@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/compiler.h>
+#include <linux/compiler.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+
+#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
+
+#define REF_CLK_FREQ			100000000
+
+/* PCIe ELBI registers */
+#define SOC_PCIECTRL_CTRL0_ADDR		0x000
+#define SOC_PCIECTRL_CTRL1_ADDR		0x004
+#define SOC_PCIEPHY_CTRL2_ADDR		0x008
+#define SOC_PCIEPHY_CTRL3_ADDR		0x00c
+#define PCIE_ELBI_SLV_DBI_ENABLE	(0x1 << 21)
+
+/* info located in APB */
+#define PCIE_APP_LTSSM_ENABLE	0x01c
+#define PCIE_APB_PHY_CTRL0	0x0
+#define PCIE_APB_PHY_CTRL1	0x4
+#define PCIE_APB_PHY_STATUS0	0x400
+#define PCIE_LINKUP_ENABLE	(0x8020)
+#define PCIE_LTSSM_ENABLE_BIT	(0x1 << 11)
+#define PIPE_CLK_STABLE		(0x1 << 19)
+#define PHY_REF_PAD_BIT		(0x1 << 8)
+#define PHY_PWR_DOWN_BIT	(0x1 << 22)
+#define PHY_RST_ACK_BIT		(0x1 << 16)
+
+/* info located in sysctrl */
+#define SCTRL_PCIE_CMOS_OFFSET	0x60
+#define SCTRL_PCIE_CMOS_BIT	0x10
+#define SCTRL_PCIE_ISO_OFFSET	0x44
+#define SCTRL_PCIE_ISO_BIT	0x30
+#define SCTRL_PCIE_HPCLK_OFFSET	0x190
+#define SCTRL_PCIE_HPCLK_BIT	0x184000
+#define SCTRL_PCIE_OE_OFFSET	0x14a
+#define PCIE_DEBOUNCE_PARAM	0xF0F400
+#define PCIE_OE_BYPASS		(0x3 << 28)
+
+/* peri_crg ctrl */
+#define CRGCTRL_PCIE_ASSERT_OFFSET	0x88
+#define CRGCTRL_PCIE_ASSERT_BIT		0x8c000000
+
+/* Time for delay */
+#define REF_2_PERST_MIN		20000
+#define REF_2_PERST_MAX		25000
+#define PERST_2_ACCESS_MIN	10000
+#define PERST_2_ACCESS_MAX	12000
+#define LINK_WAIT_MIN		900
+#define LINK_WAIT_MAX		1000
+#define PIPE_CLK_WAIT_MIN	550
+#define PIPE_CLK_WAIT_MAX	600
+#define TIME_CMOS_MIN		100
+#define TIME_CMOS_MAX		105
+#define TIME_PHY_PD_MIN		10
+#define TIME_PHY_PD_MAX		11
+
+struct kirin_pcie {
+	struct dw_pcie	*pci;
+	void __iomem	*apb_base;
+	void __iomem	*phy_base;
+	struct regmap	*crgctrl;
+	struct regmap	*sysctrl;
+	struct clk	*apb_sys_clk;
+	struct clk	*apb_phy_clk;
+	struct clk	*phy_ref_clk;
+	struct clk	*pcie_aclk;
+	struct clk	*pcie_aux_clk;
+	int		gpio_id_reset;
+};
+
+/* Registers in PCIeCTRL */
+static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie,
+					 u32 val, u32 reg)
+{
+	writel(val, kirin_pcie->apb_base + reg);
+}
+
+static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg)
+{
+	return readl(kirin_pcie->apb_base + reg);
+}
+
+/* Registers in PCIePHY */
+static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie,
+					u32 val, u32 reg)
+{
+	writel(val, kirin_pcie->phy_base + reg);
+}
+
+static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg)
+{
+	return readl(kirin_pcie->phy_base + reg);
+}
+
+static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie,
+			       struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
+	if (IS_ERR(kirin_pcie->phy_ref_clk))
+		return PTR_ERR(kirin_pcie->phy_ref_clk);
+
+	kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux");
+	if (IS_ERR(kirin_pcie->pcie_aux_clk))
+		return PTR_ERR(kirin_pcie->pcie_aux_clk);
+
+	kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
+	if (IS_ERR(kirin_pcie->apb_phy_clk))
+		return PTR_ERR(kirin_pcie->apb_phy_clk);
+
+	kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
+	if (IS_ERR(kirin_pcie->apb_sys_clk))
+		return PTR_ERR(kirin_pcie->apb_sys_clk);
+
+	kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk");
+	if (IS_ERR(kirin_pcie->pcie_aclk))
+		return PTR_ERR(kirin_pcie->pcie_aclk);
+
+	return 0;
+}
+
+static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
+				    struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *apb;
+	struct resource *phy;
+	struct resource *dbi;
+
+	apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
+	kirin_pcie->apb_base = devm_ioremap_resource(dev, apb);
+	if (IS_ERR(kirin_pcie->apb_base))
+		return PTR_ERR(kirin_pcie->apb_base);
+
+	phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+	kirin_pcie->phy_base = devm_ioremap_resource(dev, phy);
+	if (IS_ERR(kirin_pcie->phy_base))
+		return PTR_ERR(kirin_pcie->phy_base);
+
+	dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
+	if (IS_ERR(kirin_pcie->pci->dbi_base))
+		return PTR_ERR(kirin_pcie->pci->dbi_base);
+
+	kirin_pcie->crgctrl =
+		syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
+	if (IS_ERR(kirin_pcie->crgctrl))
+		return PTR_ERR(kirin_pcie->crgctrl);
+
+	kirin_pcie->sysctrl =
+		syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
+	if (IS_ERR(kirin_pcie->sysctrl))
+		return PTR_ERR(kirin_pcie->sysctrl);
+
+	return 0;
+}
+
+static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
+{
+	struct device *dev = kirin_pcie->pci->dev;
+	u32 reg_val;
+
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+	reg_val &= ~PHY_REF_PAD_BIT;
+	kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0);
+	reg_val &= ~PHY_PWR_DOWN_BIT;
+	kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0);
+	usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
+
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+	reg_val &= ~PHY_RST_ACK_BIT;
+	kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+
+	usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
+	if (reg_val & PIPE_CLK_STABLE) {
+		dev_err(dev, "PIPE clk is not stable\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie)
+{
+	u32 val;
+
+	regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
+	val |= PCIE_DEBOUNCE_PARAM;
+	val &= ~PCIE_OE_BYPASS;
+	regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
+}
+
+static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable)
+{
+	int ret = 0;
+
+	if (!enable)
+		goto close_clk;
+
+	ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(kirin_pcie->phy_ref_clk);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(kirin_pcie->apb_sys_clk);
+	if (ret)
+		goto apb_sys_fail;
+
+	ret = clk_prepare_enable(kirin_pcie->apb_phy_clk);
+	if (ret)
+		goto apb_phy_fail;
+
+	ret = clk_prepare_enable(kirin_pcie->pcie_aclk);
+	if (ret)
+		goto aclk_fail;
+
+	ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk);
+	if (ret)
+		goto aux_clk_fail;
+
+	return 0;
+
+close_clk:
+	clk_disable_unprepare(kirin_pcie->pcie_aux_clk);
+aux_clk_fail:
+	clk_disable_unprepare(kirin_pcie->pcie_aclk);
+aclk_fail:
+	clk_disable_unprepare(kirin_pcie->apb_phy_clk);
+apb_phy_fail:
+	clk_disable_unprepare(kirin_pcie->apb_sys_clk);
+apb_sys_fail:
+	clk_disable_unprepare(kirin_pcie->phy_ref_clk);
+
+	return ret;
+}
+
+static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie)
+{
+	int ret;
+
+	/* Power supply for Host */
+	regmap_write(kirin_pcie->sysctrl,
+		     SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
+	usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
+	kirin_pcie_oe_enable(kirin_pcie);
+
+	ret = kirin_pcie_clk_ctrl(kirin_pcie, true);
+	if (ret)
+		return ret;
+
+	/* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
+	regmap_write(kirin_pcie->sysctrl,
+		     SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
+	regmap_write(kirin_pcie->crgctrl,
+		     CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
+	regmap_write(kirin_pcie->sysctrl,
+		     SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
+
+	ret = kirin_pcie_phy_init(kirin_pcie);
+	if (ret)
+		goto close_clk;
+
+	/* perst assert Endpoint */
+	if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) {
+		usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
+		ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1);
+		if (ret)
+			goto close_clk;
+		usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+		return 0;
+	}
+
+close_clk:
+	kirin_pcie_clk_ctrl(kirin_pcie, false);
+	return ret;
+}
+
+static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
+					   bool on)
+{
+	u32 val;
+
+	val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR);
+	if (on)
+		val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+	kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
+}
+
+static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
+					   bool on)
+{
+	u32 val;
+
+	val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR);
+	if (on)
+		val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+	kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
+}
+
+static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
+				  int where, int size, u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	int ret;
+
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
+	ret = dw_pcie_read(pci->dbi_base + where, size, val);
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+
+	return ret;
+}
+
+static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
+				  int where, int size, u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	int ret;
+
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
+	ret = dw_pcie_write(pci->dbi_base + where, size, val);
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+
+	return ret;
+}
+
+static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+			       u32 reg, size_t size)
+{
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	u32 ret;
+
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
+	dw_pcie_read(base + reg, size, &ret);
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+
+	return ret;
+}
+
+static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+				 u32 reg, size_t size, u32 val)
+{
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
+	dw_pcie_write(base + reg, size, val);
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+}
+
+static int kirin_pcie_link_up(struct dw_pcie *pci)
+{
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
+
+	if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
+		return 1;
+
+	return 0;
+}
+
+static int kirin_pcie_establish_link(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	struct device *dev = kirin_pcie->pci->dev;
+	int count = 0;
+
+	if (kirin_pcie_link_up(pci))
+		return 0;
+
+	dw_pcie_setup_rc(pp);
+
+	/* assert LTSSM enable */
+	kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
+			      PCIE_APP_LTSSM_ENABLE);
+
+	/* check if the link is up or not */
+	while (!kirin_pcie_link_up(pci)) {
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+		count++;
+		if (count == 1000) {
+			dev_err(dev, "Link Fail\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void kirin_pcie_host_init(struct pcie_port *pp)
+{
+	kirin_pcie_establish_link(pp);
+}
+
+static struct dw_pcie_ops kirin_dw_pcie_ops = {
+	.read_dbi = kirin_pcie_read_dbi,
+	.write_dbi = kirin_pcie_write_dbi,
+	.link_up = kirin_pcie_link_up,
+};
+
+static struct dw_pcie_host_ops kirin_pcie_host_ops = {
+	.rd_own_conf = kirin_pcie_rd_own_conf,
+	.wr_own_conf = kirin_pcie_wr_own_conf,
+	.host_init = kirin_pcie_host_init,
+};
+
+static int __init kirin_add_pcie_port(struct dw_pcie *pci,
+				      struct platform_device *pdev)
+{
+	pci->pp.ops = &kirin_pcie_host_ops;
+
+	return dw_pcie_host_init(&pci->pp);
+}
+
+static int kirin_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct kirin_pcie *kirin_pcie;
+	struct dw_pcie *pci;
+	int ret;
+
+	if (!dev->of_node) {
+		dev_err(dev, "NULL node\n");
+		return -EINVAL;
+	}
+
+	kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
+	if (!kirin_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &kirin_dw_pcie_ops;
+	kirin_pcie->pci = pci;
+
+	ret = kirin_pcie_get_clk(kirin_pcie, pdev);
+	if (ret)
+		return ret;
+
+	ret = kirin_pcie_get_resource(kirin_pcie, pdev);
+	if (ret)
+		return ret;
+
+	kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
+						      "reset-gpio", 0);
+	if (kirin_pcie->gpio_id_reset < 0)
+		return -ENODEV;
+
+	ret = kirin_pcie_power_on(kirin_pcie);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, kirin_pcie);
+
+	return kirin_add_pcie_port(pci, pdev);
+}
+
+static const struct of_device_id kirin_pcie_match[] = {
+	{ .compatible = "hisilicon,kirin960-pcie" },
+	{},
+};
+
+struct platform_driver kirin_pcie_driver = {
+	.probe			= kirin_pcie_probe,
+	.driver			= {
+		.name			= "kirin-pcie",
+		.of_match_table = kirin_pcie_match,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(kirin_pcie_driver);
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 5bf23d432fdb..68c5f2ab5bc8 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -51,6 +51,12 @@
 #define PCIE20_ELBI_SYS_CTRL			0x04
 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
 
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c
+#define CFG_BRIDGE_SB_INIT			BIT(0)
+
 #define PCIE20_CAP				0x70
 
 #define PERST_DELAY_US				1000
@@ -86,10 +92,29 @@ struct qcom_pcie_resources_v2 {
 	struct clk *pipe_clk;
 };
 
+struct qcom_pcie_resources_v3 {
+	struct clk *aux_clk;
+	struct clk *master_clk;
+	struct clk *slave_clk;
+	struct reset_control *axi_m_reset;
+	struct reset_control *axi_s_reset;
+	struct reset_control *pipe_reset;
+	struct reset_control *axi_m_vmid_reset;
+	struct reset_control *axi_s_xpu_reset;
+	struct reset_control *parf_reset;
+	struct reset_control *phy_reset;
+	struct reset_control *axi_m_sticky_reset;
+	struct reset_control *pipe_sticky_reset;
+	struct reset_control *pwr_reset;
+	struct reset_control *ahb_reset;
+	struct reset_control *phy_ahb_reset;
+};
+
 union qcom_pcie_resources {
 	struct qcom_pcie_resources_v0 v0;
 	struct qcom_pcie_resources_v1 v1;
 	struct qcom_pcie_resources_v2 v2;
+	struct qcom_pcie_resources_v3 v3;
 };
 
 struct qcom_pcie;
@@ -133,26 +158,6 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
 	return dw_handle_msi_irq(pp);
 }
 
-static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
-{
-	u32 val;
-
-	/* enable link training */
-	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
-	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
-	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
-}
-
-static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
-{
-	u32 val;
-
-	/* enable link training */
-	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
-	val |= BIT(8);
-	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
-}
-
 static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
 {
 	struct dw_pcie *pci = pcie->pci;
@@ -167,6 +172,16 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
 	return dw_pcie_wait_for_link(pci);
 }
 
+static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
+{
+	u32 val;
+
+	/* enable link training */
+	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
+	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
 static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
 {
 	struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
@@ -217,36 +232,6 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
 	return PTR_ERR_OR_ZERO(res->phy_reset);
 }
 
-static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
-{
-	struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
-	struct dw_pcie *pci = pcie->pci;
-	struct device *dev = pci->dev;
-
-	res->vdda = devm_regulator_get(dev, "vdda");
-	if (IS_ERR(res->vdda))
-		return PTR_ERR(res->vdda);
-
-	res->iface = devm_clk_get(dev, "iface");
-	if (IS_ERR(res->iface))
-		return PTR_ERR(res->iface);
-
-	res->aux = devm_clk_get(dev, "aux");
-	if (IS_ERR(res->aux))
-		return PTR_ERR(res->aux);
-
-	res->master_bus = devm_clk_get(dev, "master_bus");
-	if (IS_ERR(res->master_bus))
-		return PTR_ERR(res->master_bus);
-
-	res->slave_bus = devm_clk_get(dev, "slave_bus");
-	if (IS_ERR(res->slave_bus))
-		return PTR_ERR(res->slave_bus);
-
-	res->core = devm_reset_control_get(dev, "core");
-	return PTR_ERR_OR_ZERO(res->core);
-}
-
 static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
 {
 	struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
@@ -357,6 +342,13 @@ static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
 	/* wait for clock acquisition */
 	usleep_range(1000, 1500);
 
+
+	/* Set the Max TLP size to 2K, instead of using default of 4K */
+	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+	writel(CFG_BRIDGE_SB_INIT,
+	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+
 	return 0;
 
 err_deassert_ahb:
@@ -375,6 +367,36 @@ err_refclk:
 	return ret;
 }
 
+static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+
+	res->vdda = devm_regulator_get(dev, "vdda");
+	if (IS_ERR(res->vdda))
+		return PTR_ERR(res->vdda);
+
+	res->iface = devm_clk_get(dev, "iface");
+	if (IS_ERR(res->iface))
+		return PTR_ERR(res->iface);
+
+	res->aux = devm_clk_get(dev, "aux");
+	if (IS_ERR(res->aux))
+		return PTR_ERR(res->aux);
+
+	res->master_bus = devm_clk_get(dev, "master_bus");
+	if (IS_ERR(res->master_bus))
+		return PTR_ERR(res->master_bus);
+
+	res->slave_bus = devm_clk_get(dev, "slave_bus");
+	if (IS_ERR(res->slave_bus))
+		return PTR_ERR(res->slave_bus);
+
+	res->core = devm_reset_control_get(dev, "core");
+	return PTR_ERR_OR_ZERO(res->core);
+}
+
 static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
 {
 	struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
@@ -455,6 +477,16 @@ err_res:
 	return ret;
 }
 
+static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
+{
+	u32 val;
+
+	/* enable link training */
+	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+	val |= BIT(8);
+	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
 static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
 {
 	struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
@@ -481,6 +513,17 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
 	return PTR_ERR_OR_ZERO(res->pipe_clk);
 }
 
+static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+
+	clk_disable_unprepare(res->pipe_clk);
+	clk_disable_unprepare(res->slave_clk);
+	clk_disable_unprepare(res->master_clk);
+	clk_disable_unprepare(res->cfg_clk);
+	clk_disable_unprepare(res->aux_clk);
+}
+
 static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
 {
 	struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
@@ -562,22 +605,290 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
 	return 0;
 }
 
-static int qcom_pcie_link_up(struct dw_pcie *pci)
+static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie)
 {
-	u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+	struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
 
-	return !!(val & PCI_EXP_LNKSTA_DLLLA);
+	res->aux_clk = devm_clk_get(dev, "aux");
+	if (IS_ERR(res->aux_clk))
+		return PTR_ERR(res->aux_clk);
+
+	res->master_clk = devm_clk_get(dev, "master_bus");
+	if (IS_ERR(res->master_clk))
+		return PTR_ERR(res->master_clk);
+
+	res->slave_clk = devm_clk_get(dev, "slave_bus");
+	if (IS_ERR(res->slave_clk))
+		return PTR_ERR(res->slave_clk);
+
+	res->axi_m_reset = devm_reset_control_get(dev, "axi_m");
+	if (IS_ERR(res->axi_m_reset))
+		return PTR_ERR(res->axi_m_reset);
+
+	res->axi_s_reset = devm_reset_control_get(dev, "axi_s");
+	if (IS_ERR(res->axi_s_reset))
+		return PTR_ERR(res->axi_s_reset);
+
+	res->pipe_reset = devm_reset_control_get(dev, "pipe");
+	if (IS_ERR(res->pipe_reset))
+		return PTR_ERR(res->pipe_reset);
+
+	res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid");
+	if (IS_ERR(res->axi_m_vmid_reset))
+		return PTR_ERR(res->axi_m_vmid_reset);
+
+	res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu");
+	if (IS_ERR(res->axi_s_xpu_reset))
+		return PTR_ERR(res->axi_s_xpu_reset);
+
+	res->parf_reset = devm_reset_control_get(dev, "parf");
+	if (IS_ERR(res->parf_reset))
+		return PTR_ERR(res->parf_reset);
+
+	res->phy_reset = devm_reset_control_get(dev, "phy");
+	if (IS_ERR(res->phy_reset))
+		return PTR_ERR(res->phy_reset);
+
+	res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky");
+	if (IS_ERR(res->axi_m_sticky_reset))
+		return PTR_ERR(res->axi_m_sticky_reset);
+
+	res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky");
+	if (IS_ERR(res->pipe_sticky_reset))
+		return PTR_ERR(res->pipe_sticky_reset);
+
+	res->pwr_reset = devm_reset_control_get(dev, "pwr");
+	if (IS_ERR(res->pwr_reset))
+		return PTR_ERR(res->pwr_reset);
+
+	res->ahb_reset = devm_reset_control_get(dev, "ahb");
+	if (IS_ERR(res->ahb_reset))
+		return PTR_ERR(res->ahb_reset);
+
+	res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb");
+	if (IS_ERR(res->phy_ahb_reset))
+		return PTR_ERR(res->phy_ahb_reset);
+
+	return 0;
 }
 
-static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie)
 {
-	struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
-
-	clk_disable_unprepare(res->pipe_clk);
+	struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
+
+	reset_control_assert(res->axi_m_reset);
+	reset_control_assert(res->axi_s_reset);
+	reset_control_assert(res->pipe_reset);
+	reset_control_assert(res->pipe_sticky_reset);
+	reset_control_assert(res->phy_reset);
+	reset_control_assert(res->phy_ahb_reset);
+	reset_control_assert(res->axi_m_sticky_reset);
+	reset_control_assert(res->pwr_reset);
+	reset_control_assert(res->ahb_reset);
+	clk_disable_unprepare(res->aux_clk);
+	clk_disable_unprepare(res->master_clk);
 	clk_disable_unprepare(res->slave_clk);
+}
+
+static int qcom_pcie_init_v3(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	int ret;
+
+	ret = reset_control_assert(res->axi_m_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert axi master reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->axi_s_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert axi slave reset\n");
+		return ret;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_assert(res->pipe_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert pipe reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->pipe_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert pipe sticky reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->phy_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert phy reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->phy_ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert phy ahb reset\n");
+		return ret;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_assert(res->axi_m_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert axi master sticky reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->pwr_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert power reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert ahb reset\n");
+		return ret;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_deassert(res->phy_ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert phy ahb reset\n");
+		return ret;
+	}
+
+	ret = reset_control_deassert(res->phy_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert phy reset\n");
+		goto err_rst_phy;
+	}
+
+	ret = reset_control_deassert(res->pipe_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert pipe reset\n");
+		goto err_rst_pipe;
+	}
+
+	ret = reset_control_deassert(res->pipe_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert pipe sticky reset\n");
+		goto err_rst_pipe_sticky;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_deassert(res->axi_m_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi master reset\n");
+		goto err_rst_axi_m;
+	}
+
+	ret = reset_control_deassert(res->axi_m_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi master sticky reset\n");
+		goto err_rst_axi_m_sticky;
+	}
+
+	ret = reset_control_deassert(res->axi_s_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi slave reset\n");
+		goto err_rst_axi_s;
+	}
+
+	ret = reset_control_deassert(res->pwr_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert power reset\n");
+		goto err_rst_pwr;
+	}
+
+	ret = reset_control_deassert(res->ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert ahb reset\n");
+		goto err_rst_ahb;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = clk_prepare_enable(res->aux_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable iface clock\n");
+		goto err_clk_aux;
+	}
+
+	ret = clk_prepare_enable(res->master_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable core clock\n");
+		goto err_clk_axi_m;
+	}
+
+	ret = clk_prepare_enable(res->slave_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable phy clock\n");
+		goto err_clk_axi_s;
+	}
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= !BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	/* MAC PHY_POWERDOWN MUX DISABLE  */
+	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+	val &= ~BIT(29);
+	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+	val |= BIT(4);
+	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+	val |= BIT(31);
+	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+	return 0;
+
+err_clk_axi_s:
 	clk_disable_unprepare(res->master_clk);
-	clk_disable_unprepare(res->cfg_clk);
+err_clk_axi_m:
 	clk_disable_unprepare(res->aux_clk);
+err_clk_aux:
+	reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+	reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+	reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+	reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+	reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+	reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+	reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+	reset_control_assert(res->phy_reset);
+err_rst_phy:
+	reset_control_assert(res->phy_ahb_reset);
+	return ret;
+}
+
+static int qcom_pcie_link_up(struct dw_pcie *pci)
+{
+	u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+
+	return !!(val & PCI_EXP_LNKSTA_DLLLA);
 }
 
 static void qcom_pcie_host_init(struct pcie_port *pp)
@@ -634,7 +945,7 @@ static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
 	return dw_pcie_read(pci->dbi_base + where, size, val);
 }
 
-static struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
 	.host_init = qcom_pcie_host_init,
 	.rd_own_conf = qcom_pcie_rd_own_conf,
 };
@@ -665,6 +976,13 @@ static const struct dw_pcie_ops dw_pcie_ops = {
 	.link_up = qcom_pcie_link_up,
 };
 
+static const struct qcom_pcie_ops ops_v3 = {
+	.get_resources = qcom_pcie_get_resources_v3,
+	.init = qcom_pcie_init_v3,
+	.deinit = qcom_pcie_deinit_v3,
+	.ltssm_enable = qcom_pcie_v2_ltssm_enable,
+};
+
 static int qcom_pcie_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -727,7 +1045,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
 
 		ret = devm_request_irq(dev, pp->msi_irq,
 				       qcom_pcie_msi_irq_handler,
-				       IRQF_SHARED, "qcom-pcie-msi", pp);
+				       IRQF_SHARED | IRQF_NO_THREAD,
+				       "qcom-pcie-msi", pp);
 		if (ret) {
 			dev_err(dev, "cannot request msi irq\n");
 			return ret;
@@ -754,6 +1073,7 @@ static const struct of_device_id qcom_pcie_match[] = {
 	{ .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
 	{ .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
 	{ .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
+	{ .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 },
 	{ }
 };
 
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c
index 8ff36b3dbbdf..80897291e0fb 100644
--- a/drivers/pci/dwc/pcie-spear13xx.c
+++ b/drivers/pci/dwc/pcie-spear13xx.c
@@ -186,7 +186,7 @@ static void spear13xx_pcie_host_init(struct pcie_port *pp)
 	spear13xx_pcie_enable_interrupts(spear13xx_pcie);
 }
 
-static struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
+static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
 	.host_init = spear13xx_pcie_host_init,
 };
 
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 7f47cd5e10a5..89d61c2cbfaa 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -180,6 +180,31 @@ config PCIE_ROCKCHIP
 	  There is 1 internal PCIe port available to support GEN2 with
 	  4 slots.
 
+config PCIE_MEDIATEK
+	bool "MediaTek PCIe controller"
+	depends on ARM && (ARCH_MEDIATEK || COMPILE_TEST)
+	depends on OF
+	depends on PCI
+	select PCIEPORTBUS
+	help
+	  Say Y here if you want to enable PCIe controller support on
+	  MT7623 series SoCs.  There is one single root complex with 3 root
+	  ports available.  Each port supports Gen2 lane x1.
+
+config PCIE_TANGO_SMP8759
+	bool "Tango SMP8759 PCIe controller (DANGEROUS)"
+	depends on ARCH_TANGO && PCI_MSI && OF
+	depends on BROKEN
+	select PCI_HOST_COMMON
+	help
+	  Say Y here to enable PCIe controller support for Sigma Designs
+	  Tango SMP8759-based systems.
+
+	  Note: The SMP8759 controller multiplexes PCI config and MMIO
+	  accesses, and Linux doesn't provide a way to serialize them.
+	  This can lead to data corruption if drivers perform concurrent
+	  config and MMIO accesses.
+
 config VMD
 	depends on PCI_MSI && X86_64 && SRCU
 	tristate "Intel Volume Management Device Driver"
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index cab879578003..12382785e02a 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -18,6 +18,8 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
 obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
 obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
 obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
+obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
 obj-$(CONFIG_VMD) += vmd.o
 
 # The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 37d0bcd31f8a..5fb9b620ac78 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -886,12 +886,14 @@ static int advk_pcie_probe(struct platform_device *pdev)
 	struct advk_pcie *pcie;
 	struct resource *res;
 	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
 	int ret, irq;
 
-	pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
-	if (!pcie)
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
+	if (!bridge)
 		return -ENOMEM;
 
+	pcie = pci_host_bridge_priv(bridge);
 	pcie->pdev = pdev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -929,14 +931,21 @@ static int advk_pcie_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops,
-				pcie, &pcie->resources);
-	if (!bus) {
+	list_splice_init(&pcie->resources, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = 0;
+	bridge->ops = &advk_pcie_ops;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0) {
 		advk_pcie_remove_msi_irq_domain(pcie);
 		advk_pcie_remove_irq_domain(pcie);
-		return -ENOMEM;
+		return ret;
 	}
 
+	bus = bridge->bus;
+
 	pci_bus_assign_resources(bus);
 
 	list_for_each_entry(child, &bus->children, node)
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index d26501c4145a..5162dffc102b 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -25,6 +25,7 @@
 #include <linux/irqchip/chained_irq.h>
 #include <linux/bitops.h>
 #include <linux/irq.h>
+#include <linux/clk.h>
 
 /*
  * Special configuration registers directly in the first few words
@@ -37,6 +38,7 @@
 #define PCI_CONFIG	0x28 /* PCI configuration command register */
 #define PCI_DATA	0x2C
 
+#define FARADAY_PCI_STATUS_CMD		0x04 /* Status and command */
 #define FARADAY_PCI_PMC			0x40 /* Power management control */
 #define FARADAY_PCI_PMCSR		0x44 /* Power management status */
 #define FARADAY_PCI_CTRL1		0x48 /* Control register 1 */
@@ -45,6 +47,8 @@
 #define FARADAY_PCI_MEM2_BASE_SIZE	0x54 /* Memory base and size #2 */
 #define FARADAY_PCI_MEM3_BASE_SIZE	0x58 /* Memory base and size #3 */
 
+#define PCI_STATUS_66MHZ_CAPABLE	BIT(21)
+
 /* Bits 31..28 gives INTD..INTA status */
 #define PCI_CTRL2_INTSTS_SHIFT		28
 #define PCI_CTRL2_INTMASK_CMDERR	BIT(27)
@@ -117,6 +121,7 @@ struct faraday_pci {
 	void __iomem *base;
 	struct irq_domain *irqdomain;
 	struct pci_bus *bus;
+	struct clk *bus_clk;
 };
 
 static int faraday_res_to_memcfg(resource_size_t mem_base,
@@ -178,12 +183,11 @@ static int faraday_res_to_memcfg(resource_size_t mem_base,
 	return 0;
 }
 
-static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
-				   int config, int size, u32 *value)
+static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
+				       unsigned int fn, int config, int size,
+				       u32 *value)
 {
-	struct faraday_pci *p = bus->sysdata;
-
-	writel(PCI_CONF_BUS(bus->number) |
+	writel(PCI_CONF_BUS(bus_number) |
 			PCI_CONF_DEVICE(PCI_SLOT(fn)) |
 			PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
 			PCI_CONF_WHERE(config) |
@@ -197,24 +201,28 @@ static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
 	else if (size == 2)
 		*value = (*value >> (8 * (config & 3))) & 0xFFFF;
 
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
+				   int config, int size, u32 *value)
+{
+	struct faraday_pci *p = bus->sysdata;
+
 	dev_dbg(&bus->dev,
 		"[read]  slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
 		PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
 
-	return PCIBIOS_SUCCESSFUL;
+	return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value);
 }
 
-static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
-				    int config, int size, u32 value)
+static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
+					 unsigned int fn, int config, int size,
+					 u32 value)
 {
-	struct faraday_pci *p = bus->sysdata;
 	int ret = PCIBIOS_SUCCESSFUL;
 
-	dev_dbg(&bus->dev,
-		"[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
-		PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
-
-	writel(PCI_CONF_BUS(bus->number) |
+	writel(PCI_CONF_BUS(bus_number) |
 			PCI_CONF_DEVICE(PCI_SLOT(fn)) |
 			PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
 			PCI_CONF_WHERE(config) |
@@ -238,6 +246,19 @@ static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
 	return ret;
 }
 
+static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
+				    int config, int size, u32 value)
+{
+	struct faraday_pci *p = bus->sysdata;
+
+	dev_dbg(&bus->dev,
+		"[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+		PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+
+	return faraday_raw_pci_write_config(p, bus->number, fn, config, size,
+					    value);
+}
+
 static struct pci_ops faraday_pci_ops = {
 	.read	= faraday_pci_read_config,
 	.write	= faraday_pci_write_config,
@@ -248,10 +269,10 @@ static void faraday_pci_ack_irq(struct irq_data *d)
 	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
 	unsigned int reg;
 
-	faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
 	reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
 	reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT);
-	faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
 }
 
 static void faraday_pci_mask_irq(struct irq_data *d)
@@ -259,10 +280,10 @@ static void faraday_pci_mask_irq(struct irq_data *d)
 	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
 	unsigned int reg;
 
-	faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
 	reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT)
 		 | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT));
-	faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
 }
 
 static void faraday_pci_unmask_irq(struct irq_data *d)
@@ -270,10 +291,10 @@ static void faraday_pci_unmask_irq(struct irq_data *d)
 	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
 	unsigned int reg;
 
-	faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
 	reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
 	reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT);
-	faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
 }
 
 static void faraday_pci_irq_handler(struct irq_desc *desc)
@@ -282,7 +303,7 @@ static void faraday_pci_irq_handler(struct irq_desc *desc)
 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
 	unsigned int irq_stat, reg, i;
 
-	faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
 	irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT;
 
 	chained_irq_enter(irqchip, desc);
@@ -403,8 +424,8 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
 		dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
 			 i + 1, range.pci_addr, end, val);
 		if (i <= 2) {
-			faraday_pci_write_config(p->bus, 0, confreg[i],
-						 4, val);
+			faraday_raw_pci_write_config(p, 0, 0, confreg[i],
+						     4, val);
 		} else {
 			dev_err(dev, "ignore extraneous dma-range %d\n", i);
 			break;
@@ -428,11 +449,14 @@ static int faraday_pci_probe(struct platform_device *pdev)
 	struct resource *mem;
 	struct resource *io;
 	struct pci_host_bridge *host;
+	struct clk *clk;
+	unsigned char max_bus_speed = PCI_SPEED_33MHz;
+	unsigned char cur_bus_speed = PCI_SPEED_33MHz;
 	int ret;
 	u32 val;
 	LIST_HEAD(res);
 
-	host = pci_alloc_host_bridge(sizeof(*p));
+	host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
 	if (!host)
 		return -ENOMEM;
 
@@ -440,10 +464,30 @@ static int faraday_pci_probe(struct platform_device *pdev)
 	host->ops = &faraday_pci_ops;
 	host->busnr = 0;
 	host->msi = NULL;
+	host->map_irq = of_irq_parse_and_map_pci;
+	host->swizzle_irq = pci_common_swizzle;
 	p = pci_host_bridge_priv(host);
 	host->sysdata = p;
 	p->dev = dev;
 
+	/* Retrieve and enable optional clocks */
+	clk = devm_clk_get(dev, "PCLK");
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+	ret = clk_prepare_enable(clk);
+	if (ret) {
+		dev_err(dev, "could not prepare PCLK\n");
+		return ret;
+	}
+	p->bus_clk = devm_clk_get(dev, "PCICLK");
+	if (IS_ERR(p->bus_clk))
+		return PTR_ERR(clk);
+	ret = clk_prepare_enable(p->bus_clk);
+	if (ret) {
+		dev_err(dev, "could not prepare PCICLK\n");
+		return ret;
+	}
+
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	p->base = devm_ioremap_resource(dev, regs);
 	if (IS_ERR(p->base))
@@ -496,17 +540,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
 	val |= PCI_COMMAND_MEMORY;
 	val |= PCI_COMMAND_MASTER;
 	writel(val, p->base + PCI_CTRL);
-
-	list_splice_init(&res, &host->windows);
-	ret = pci_register_host_bridge(host);
-	if (ret) {
-		dev_err(dev, "failed to register host: %d\n", ret);
-		return ret;
-	}
-	p->bus = host->bus;
-
 	/* Mask and clear all interrupts */
-	faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
+	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
 	if (variant->cascaded_irq) {
 		ret = faraday_pci_setup_cascaded_irq(p);
 		if (ret) {
@@ -515,12 +550,48 @@ static int faraday_pci_probe(struct platform_device *pdev)
 		}
 	}
 
+	/* Check bus clock if we can gear up to 66 MHz */
+	if (!IS_ERR(p->bus_clk)) {
+		unsigned long rate;
+		u32 val;
+
+		faraday_raw_pci_read_config(p, 0, 0,
+					    FARADAY_PCI_STATUS_CMD, 4, &val);
+		rate = clk_get_rate(p->bus_clk);
+
+		if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) {
+			dev_info(dev, "33MHz bus is 66MHz capable\n");
+			max_bus_speed = PCI_SPEED_66MHz;
+			ret = clk_set_rate(p->bus_clk, 66000000);
+			if (ret)
+				dev_err(dev, "failed to set bus clock\n");
+		} else {
+			dev_info(dev, "33MHz only bus\n");
+			max_bus_speed = PCI_SPEED_33MHz;
+		}
+
+		/* Bumping the clock may fail so read back the rate */
+		rate = clk_get_rate(p->bus_clk);
+		if (rate == 33000000)
+			cur_bus_speed = PCI_SPEED_33MHz;
+		if (rate == 66000000)
+			cur_bus_speed = PCI_SPEED_66MHz;
+	}
+
 	ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
 	if (ret)
 		return ret;
 
-	pci_scan_child_bus(p->bus);
-	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+	list_splice_init(&res, &host->windows);
+	ret = pci_scan_root_bus_bridge(host);
+	if (ret) {
+		dev_err(dev, "failed to scan host: %d\n", ret);
+		return ret;
+	}
+	p->bus = host->bus;
+	p->bus->max_bus_speed = max_bus_speed;
+	p->bus->cur_bus_speed = cur_bus_speed;
+
 	pci_bus_assign_resources(p->bus);
 	pci_bus_add_devices(p->bus);
 	pci_free_resource_list(&res);
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index e9a53bae1c25..44a47d4f0b8f 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -117,8 +117,14 @@ int pci_host_common_probe(struct platform_device *pdev,
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
 	struct pci_config_window *cfg;
 	struct list_head resources;
+	int ret;
+
+	bridge = devm_pci_alloc_host_bridge(dev, 0);
+	if (!bridge)
+		return -ENOMEM;
 
 	type = of_get_property(np, "device_type", NULL);
 	if (!type || strcmp(type, "pci")) {
@@ -138,16 +144,21 @@ int pci_host_common_probe(struct platform_device *pdev,
 	if (!pci_has_flag(PCI_PROBE_ONLY))
 		pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
 
-	bus = pci_scan_root_bus(dev, cfg->busr.start, &ops->pci_ops, cfg,
-				&resources);
-	if (!bus) {
-		dev_err(dev, "Scanning rootbus failed");
-		return -ENODEV;
+	list_splice_init(&resources, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = cfg;
+	bridge->busnr = cfg->busr.start;
+	bridge->ops = &ops->pci_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0) {
+		dev_err(dev, "Scanning root bridge failed");
+		return ret;
 	}
 
-#ifdef CONFIG_ARM
-	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
-#endif
+	bus = bridge->bus;
 
 	/*
 	 * We insert PCI resources into the iomem_resource and
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 84936383e269..415dcc69a502 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -64,22 +64,39 @@
  * major version.
  */
 
-#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (major)))
+#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
 
-enum {
-	PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),
-	PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
+enum pci_protocol_version_t {
+	PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),	/* Win10 */
+	PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2),	/* RS1 */
 };
 
 #define CPU_AFFINITY_ALL	-1ULL
+
+/*
+ * Supported protocol versions in the order of probing - highest go
+ * first.
+ */
+static enum pci_protocol_version_t pci_protocol_versions[] = {
+	PCI_PROTOCOL_VERSION_1_2,
+	PCI_PROTOCOL_VERSION_1_1,
+};
+
+/*
+ * Protocol version negotiated by hv_pci_protocol_negotiation().
+ */
+static enum pci_protocol_version_t pci_protocol_version;
+
 #define PCI_CONFIG_MMIO_LENGTH	0x2000
 #define CFG_PAGE_OFFSET 0x1000
 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
 
 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
 
+#define STATUS_REVISION_MISMATCH 0xC0000059
+
 /*
  * Message Types
  */
@@ -109,6 +126,9 @@ enum pci_message_type {
 	PCI_QUERY_PROTOCOL_VERSION      = PCI_MESSAGE_BASE + 0x13,
 	PCI_CREATE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x14,
 	PCI_DELETE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x15,
+	PCI_RESOURCES_ASSIGNED2		= PCI_MESSAGE_BASE + 0x16,
+	PCI_CREATE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x17,
+	PCI_DELETE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x18, /* unused */
 	PCI_MESSAGE_MAXIMUM
 };
 
@@ -179,6 +199,30 @@ struct hv_msi_desc {
 } __packed;
 
 /**
+ * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
+ * @vector:		IDT entry
+ * @delivery_mode:	As defined in Intel's Programmer's
+ *			Reference Manual, Volume 3, Chapter 8.
+ * @vector_count:	Number of contiguous entries in the
+ *			Interrupt Descriptor Table that are
+ *			occupied by this Message-Signaled
+ *			Interrupt. For "MSI", as first defined
+ *			in PCI 2.2, this can be between 1 and
+ *			32. For "MSI-X," as first defined in PCI
+ *			3.0, this must be 1, as each MSI-X table
+ *			entry would have its own descriptor.
+ * @processor_count:	number of bits enabled in array.
+ * @processor_array:	All the target virtual processors.
+ */
+struct hv_msi_desc2 {
+	u8	vector;
+	u8	delivery_mode;
+	u16	vector_count;
+	u16	processor_count;
+	u16	processor_array[32];
+} __packed;
+
+/**
  * struct tran_int_desc
  * @reserved:		unused, padding
  * @vector_count:	same as in hv_msi_desc
@@ -245,7 +289,7 @@ struct pci_packet {
 
 struct pci_version_request {
 	struct pci_message message_type;
-	enum pci_message_type protocol_version;
+	u32 protocol_version;
 } __packed;
 
 /*
@@ -294,6 +338,14 @@ struct pci_resources_assigned {
 	u32 reserved[4];
 } __packed;
 
+struct pci_resources_assigned2 {
+	struct pci_message message_type;
+	union win_slot_encoding wslot;
+	u8 memory_range[0x14][6];	/* not used here */
+	u32 msi_descriptor_count;
+	u8 reserved[70];
+} __packed;
+
 struct pci_create_interrupt {
 	struct pci_message message_type;
 	union win_slot_encoding wslot;
@@ -306,6 +358,12 @@ struct pci_create_int_response {
 	struct tran_int_desc int_desc;
 } __packed;
 
+struct pci_create_interrupt2 {
+	struct pci_message message_type;
+	union win_slot_encoding wslot;
+	struct hv_msi_desc2 int_desc;
+} __packed;
+
 struct pci_delete_interrupt {
 	struct pci_message message_type;
 	union win_slot_encoding wslot;
@@ -331,17 +389,42 @@ static int pci_ring_size = (4 * PAGE_SIZE);
 #define HV_PARTITION_ID_SELF		((u64)-1)
 #define HVCALL_RETARGET_INTERRUPT	0x7e
 
-struct retarget_msi_interrupt {
-	u64	partition_id;		/* use "self" */
-	u64	device_id;
+struct hv_interrupt_entry {
 	u32	source;			/* 1 for MSI(-X) */
 	u32	reserved1;
 	u32	address;
 	u32	data;
-	u64	reserved2;
+};
+
+#define HV_VP_SET_BANK_COUNT_MAX	5 /* current implementation limit */
+
+struct hv_vp_set {
+	u64	format;			/* 0 (HvGenericSetSparse4k) */
+	u64	valid_banks;
+	u64	masks[HV_VP_SET_BANK_COUNT_MAX];
+};
+
+/*
+ * flags for hv_device_interrupt_target.flags
+ */
+#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST		1
+#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET	2
+
+struct hv_device_interrupt_target {
 	u32	vector;
 	u32	flags;
-	u64	vp_mask;
+	union {
+		u64		 vp_mask;
+		struct hv_vp_set vp_set;
+	};
+};
+
+struct retarget_msi_interrupt {
+	u64	partition_id;		/* use "self" */
+	u64	device_id;
+	struct hv_interrupt_entry int_entry;
+	u64	reserved2;
+	struct hv_device_interrupt_target int_target;
 } __packed;
 
 /*
@@ -382,7 +465,10 @@ struct hv_pcibus_device {
 	struct msi_domain_info msi_info;
 	struct msi_controller msi_chip;
 	struct irq_domain *irq_domain;
+
+	/* hypercall arg, must not cross page boundary */
 	struct retarget_msi_interrupt retarget_msi_interrupt_params;
+
 	spinlock_t retarget_msi_interrupt_lock;
 };
 
@@ -476,6 +562,52 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev,
 static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
 static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
 
+
+/*
+ * Temporary CPU to vCPU mapping to address transitioning
+ * vmbus_cpu_number_to_vp_number() being migrated to
+ * hv_cpu_number_to_vp_number() in a separate patch. Once that patch
+ * has been picked up in the main line, remove this code here and use
+ * the official code.
+ */
+static struct hv_tmpcpumap
+{
+	bool initialized;
+	u32 vp_index[NR_CPUS];
+} hv_tmpcpumap;
+
+static void hv_tmpcpumap_init_cpu(void *_unused)
+{
+	int cpu = smp_processor_id();
+	u64 vp_index;
+
+	hv_get_vp_index(vp_index);
+
+	hv_tmpcpumap.vp_index[cpu] = vp_index;
+}
+
+static void hv_tmpcpumap_init(void)
+{
+	if (hv_tmpcpumap.initialized)
+		return;
+
+	memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index));
+	on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true);
+	hv_tmpcpumap.initialized = true;
+}
+
+/**
+ * hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr
+ *
+ * Remove once vmbus_cpu_number_to_vp_number() has been converted to
+ * hv_cpu_number_to_vp_number() and replace callers appropriately.
+ */
+static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu)
+{
+	return hv_tmpcpumap.vp_index[cpu];
+}
+
+
 /**
  * devfn_to_wslot() - Convert from Linux PCI slot to Windows
  * @devfn:	The Linux representation of PCI slot
@@ -786,8 +918,11 @@ static void hv_irq_unmask(struct irq_data *data)
 	struct cpumask *dest;
 	struct pci_bus *pbus;
 	struct pci_dev *pdev;
-	int cpu;
 	unsigned long flags;
+	u32 var_size = 0;
+	int cpu_vmbus;
+	int cpu;
+	u64 res;
 
 	dest = irq_data_get_affinity_mask(data);
 	pdev = msi_desc_to_pci_dev(msi_desc);
@@ -799,23 +934,74 @@ static void hv_irq_unmask(struct irq_data *data)
 	params = &hbus->retarget_msi_interrupt_params;
 	memset(params, 0, sizeof(*params));
 	params->partition_id = HV_PARTITION_ID_SELF;
-	params->source = 1; /* MSI(-X) */
-	params->address = msi_desc->msg.address_lo;
-	params->data = msi_desc->msg.data;
+	params->int_entry.source = 1; /* MSI(-X) */
+	params->int_entry.address = msi_desc->msg.address_lo;
+	params->int_entry.data = msi_desc->msg.data;
 	params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
 			   (hbus->hdev->dev_instance.b[4] << 16) |
 			   (hbus->hdev->dev_instance.b[7] << 8) |
 			   (hbus->hdev->dev_instance.b[6] & 0xf8) |
 			   PCI_FUNC(pdev->devfn);
-	params->vector = cfg->vector;
+	params->int_target.vector = cfg->vector;
+
+	/*
+	 * Honoring apic->irq_delivery_mode set to dest_Fixed by
+	 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
+	 * spurious interrupt storm. Not doing so does not seem to have a
+	 * negative effect (yet?).
+	 */
+
+	if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+		/*
+		 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
+		 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
+		 * with >64 VP support.
+		 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
+		 * is not sufficient for this hypercall.
+		 */
+		params->int_target.flags |=
+			HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
+		params->int_target.vp_set.valid_banks =
+			(1ull << HV_VP_SET_BANK_COUNT_MAX) - 1;
+
+		/*
+		 * var-sized hypercall, var-size starts after vp_mask (thus
+		 * vp_set.format does not count, but vp_set.valid_banks does).
+		 */
+		var_size = 1 + HV_VP_SET_BANK_COUNT_MAX;
 
-	for_each_cpu_and(cpu, dest, cpu_online_mask)
-		params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+		for_each_cpu_and(cpu, dest, cpu_online_mask) {
+			cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu);
 
-	hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL);
+			if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) {
+				dev_err(&hbus->hdev->device,
+					"too high CPU %d", cpu_vmbus);
+				res = 1;
+				goto exit_unlock;
+			}
 
+			params->int_target.vp_set.masks[cpu_vmbus / 64] |=
+				(1ULL << (cpu_vmbus & 63));
+		}
+	} else {
+		for_each_cpu_and(cpu, dest, cpu_online_mask) {
+			params->int_target.vp_mask |=
+				(1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu));
+		}
+	}
+
+	res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
+			      params, NULL);
+
+exit_unlock:
 	spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
 
+	if (res) {
+		dev_err(&hbus->hdev->device,
+			"%s() failed: %#llx", __func__, res);
+		return;
+	}
+
 	pci_msi_unmask_irq(data);
 }
 
@@ -836,6 +1022,53 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
 	complete(&comp_pkt->comp_pkt.host_event);
 }
 
+static u32 hv_compose_msi_req_v1(
+	struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
+	u32 slot, u8 vector)
+{
+	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
+	int_pkt->wslot.slot = slot;
+	int_pkt->int_desc.vector = vector;
+	int_pkt->int_desc.vector_count = 1;
+	int_pkt->int_desc.delivery_mode =
+		(apic->irq_delivery_mode == dest_LowestPrio) ?
+			dest_LowestPrio : dest_Fixed;
+
+	/*
+	 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
+	 * hv_irq_unmask().
+	 */
+	int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
+
+	return sizeof(*int_pkt);
+}
+
+static u32 hv_compose_msi_req_v2(
+	struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
+	u32 slot, u8 vector)
+{
+	int cpu;
+
+	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
+	int_pkt->wslot.slot = slot;
+	int_pkt->int_desc.vector = vector;
+	int_pkt->int_desc.vector_count = 1;
+	int_pkt->int_desc.delivery_mode =
+		(apic->irq_delivery_mode == dest_LowestPrio) ?
+			dest_LowestPrio : dest_Fixed;
+
+	/*
+	 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
+	 * by subsequent retarget in hv_irq_unmask().
+	 */
+	cpu = cpumask_first_and(affinity, cpu_online_mask);
+	int_pkt->int_desc.processor_array[0] =
+		hv_tmp_cpu_nr_to_vp_nr(cpu);
+	int_pkt->int_desc.processor_count = 1;
+
+	return sizeof(*int_pkt);
+}
+
 /**
  * hv_compose_msi_msg() - Supplies a valid MSI address/data
  * @data:	Everything about this MSI
@@ -854,15 +1087,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 	struct hv_pci_dev *hpdev;
 	struct pci_bus *pbus;
 	struct pci_dev *pdev;
-	struct pci_create_interrupt *int_pkt;
 	struct compose_comp_ctxt comp;
 	struct tran_int_desc *int_desc;
-	struct cpumask *affinity;
 	struct {
-		struct pci_packet pkt;
-		u8 buffer[sizeof(struct pci_create_interrupt)];
-	} ctxt;
-	int cpu;
+		struct pci_packet pci_pkt;
+		union {
+			struct pci_create_interrupt v1;
+			struct pci_create_interrupt2 v2;
+		} int_pkts;
+	} __packed ctxt;
+
+	u32 size;
 	int ret;
 
 	pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
@@ -885,36 +1120,44 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 
 	memset(&ctxt, 0, sizeof(ctxt));
 	init_completion(&comp.comp_pkt.host_event);
-	ctxt.pkt.completion_func = hv_pci_compose_compl;
-	ctxt.pkt.compl_ctxt = &comp;
-	int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message;
-	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
-	int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
-	int_pkt->int_desc.vector = cfg->vector;
-	int_pkt->int_desc.vector_count = 1;
-	int_pkt->int_desc.delivery_mode =
-		(apic->irq_delivery_mode == dest_LowestPrio) ? 1 : 0;
+	ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
+	ctxt.pci_pkt.compl_ctxt = &comp;
+
+	switch (pci_protocol_version) {
+	case PCI_PROTOCOL_VERSION_1_1:
+		size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
+					irq_data_get_affinity_mask(data),
+					hpdev->desc.win_slot.slot,
+					cfg->vector);
+		break;
 
-	/*
-	 * This bit doesn't have to work on machines with more than 64
-	 * processors because Hyper-V only supports 64 in a guest.
-	 */
-	affinity = irq_data_get_affinity_mask(data);
-	if (cpumask_weight(affinity) >= 32) {
-		int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
-	} else {
-		for_each_cpu_and(cpu, affinity, cpu_online_mask) {
-			int_pkt->int_desc.cpu_mask |=
-				(1ULL << vmbus_cpu_number_to_vp_number(cpu));
-		}
+	case PCI_PROTOCOL_VERSION_1_2:
+		size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
+					irq_data_get_affinity_mask(data),
+					hpdev->desc.win_slot.slot,
+					cfg->vector);
+		break;
+
+	default:
+		/* As we only negotiate protocol versions known to this driver,
+		 * this path should never hit. However, this is it not a hot
+		 * path so we print a message to aid future updates.
+		 */
+		dev_err(&hbus->hdev->device,
+			"Unexpected vPCI protocol, update driver.");
+		goto free_int_desc;
 	}
 
-	ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
-			       sizeof(*int_pkt), (unsigned long)&ctxt.pkt,
+	ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
+			       size, (unsigned long)&ctxt.pci_pkt,
 			       VM_PKT_DATA_INBAND,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
-	if (ret)
+	if (ret) {
+		dev_err(&hbus->hdev->device,
+			"Sending request for interrupt failed: 0x%x",
+			comp.comp_pkt.completion_status);
 		goto free_int_desc;
+	}
 
 	wait_for_completion(&comp.comp_pkt.host_event);
 
@@ -1513,12 +1756,12 @@ static void pci_devices_present_work(struct work_struct *work)
 		put_pcichild(hpdev, hv_pcidev_ref_initial);
 	}
 
-	switch(hbus->state) {
+	switch (hbus->state) {
 	case hv_pcibus_installed:
 		/*
-		* Tell the core to rescan bus
-		* because there may have been changes.
-		*/
+		 * Tell the core to rescan bus
+		 * because there may have been changes.
+		 */
 		pci_lock_rescan_remove();
 		pci_scan_child_bus(hbus->pci_bus);
 		pci_unlock_rescan_remove();
@@ -1800,6 +2043,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
 	struct hv_pci_compl comp_pkt;
 	struct pci_packet *pkt;
 	int ret;
+	int i;
 
 	/*
 	 * Initiate the handshake with the host and negotiate
@@ -1816,26 +2060,44 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
 	pkt->compl_ctxt = &comp_pkt;
 	version_req = (struct pci_version_request *)&pkt->message;
 	version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
-	version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT;
 
-	ret = vmbus_sendpacket(hdev->channel, version_req,
-			       sizeof(struct pci_version_request),
-			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
-			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
-	if (ret)
-		goto exit;
+	for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
+		version_req->protocol_version = pci_protocol_versions[i];
+		ret = vmbus_sendpacket(hdev->channel, version_req,
+				sizeof(struct pci_version_request),
+				(unsigned long)pkt, VM_PKT_DATA_INBAND,
+				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+		if (ret) {
+			dev_err(&hdev->device,
+				"PCI Pass-through VSP failed sending version reqquest: %#x",
+				ret);
+			goto exit;
+		}
 
-	wait_for_completion(&comp_pkt.host_event);
+		wait_for_completion(&comp_pkt.host_event);
 
-	if (comp_pkt.completion_status < 0) {
-		dev_err(&hdev->device,
-			"PCI Pass-through VSP failed version request %x\n",
-			comp_pkt.completion_status);
-		ret = -EPROTO;
-		goto exit;
+		if (comp_pkt.completion_status >= 0) {
+			pci_protocol_version = pci_protocol_versions[i];
+			dev_info(&hdev->device,
+				"PCI VMBus probing: Using version %#x\n",
+				pci_protocol_version);
+			goto exit;
+		}
+
+		if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
+			dev_err(&hdev->device,
+				"PCI Pass-through VSP failed version request: %#x",
+				comp_pkt.completion_status);
+			ret = -EPROTO;
+			goto exit;
+		}
+
+		reinit_completion(&comp_pkt.host_event);
 	}
 
-	ret = 0;
+	dev_err(&hdev->device,
+		"PCI pass-through VSP failed to find supported version");
+	ret = -EPROTO;
 
 exit:
 	kfree(pkt);
@@ -2094,13 +2356,18 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
 {
 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
 	struct pci_resources_assigned *res_assigned;
+	struct pci_resources_assigned2 *res_assigned2;
 	struct hv_pci_compl comp_pkt;
 	struct hv_pci_dev *hpdev;
 	struct pci_packet *pkt;
+	size_t size_res;
 	u32 wslot;
 	int ret;
 
-	pkt = kmalloc(sizeof(*pkt) + sizeof(*res_assigned), GFP_KERNEL);
+	size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
+			? sizeof(*res_assigned) : sizeof(*res_assigned2);
+
+	pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
 	if (!pkt)
 		return -ENOMEM;
 
@@ -2111,22 +2378,30 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
 		if (!hpdev)
 			continue;
 
-		memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned));
+		memset(pkt, 0, sizeof(*pkt) + size_res);
 		init_completion(&comp_pkt.host_event);
 		pkt->completion_func = hv_pci_generic_compl;
 		pkt->compl_ctxt = &comp_pkt;
-		res_assigned = (struct pci_resources_assigned *)&pkt->message;
-		res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED;
-		res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
 
+		if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
+			res_assigned =
+				(struct pci_resources_assigned *)&pkt->message;
+			res_assigned->message_type.type =
+				PCI_RESOURCES_ASSIGNED;
+			res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
+		} else {
+			res_assigned2 =
+				(struct pci_resources_assigned2 *)&pkt->message;
+			res_assigned2->message_type.type =
+				PCI_RESOURCES_ASSIGNED2;
+			res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
+		}
 		put_pcichild(hpdev, hv_pcidev_ref_by_slot);
 
-		ret = vmbus_sendpacket(
-			hdev->channel, &pkt->message,
-			sizeof(*res_assigned),
-			(unsigned long)pkt,
-			VM_PKT_DATA_INBAND,
-			VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+		ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+				size_res, (unsigned long)pkt,
+				VM_PKT_DATA_INBAND,
+				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 		if (ret)
 			break;
 
@@ -2204,11 +2479,19 @@ static int hv_pci_probe(struct hv_device *hdev,
 	struct hv_pcibus_device *hbus;
 	int ret;
 
-	hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
+	/*
+	 * hv_pcibus_device contains the hypercall arguments for retargeting in
+	 * hv_irq_unmask(). Those must not cross a page boundary.
+	 */
+	BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
+
+	hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
 	if (!hbus)
 		return -ENOMEM;
 	hbus->state = hv_pcibus_init;
 
+	hv_tmpcpumap_init();
+
 	/*
 	 * The PCI bus "domain" is what is called "segment" in ACPI and
 	 * other specs.  Pull it from the instance ID, to get something
@@ -2308,7 +2591,7 @@ free_config:
 close:
 	vmbus_close(hdev->channel);
 free_bus:
-	kfree(hbus);
+	free_page((unsigned long)hbus);
 	return ret;
 }
 
@@ -2386,7 +2669,7 @@ static int hv_pci_remove(struct hv_device *hdev)
 	irq_domain_free_fwnode(hbus->sysdata.fwnode);
 	put_hvpcibus(hbus);
 	wait_for_completion(&hbus->remove_event);
-	kfree(hbus);
+	free_page((unsigned long)hbus);
 	return 0;
 }
 
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 85348590848b..6f879685fedd 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -429,7 +429,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
 	return 0;
 }
 
-static struct of_device_id rcar_pci_of_match[] = {
+static const struct of_device_id rcar_pci_of_match[] = {
 	{ .compatible = "renesas,pci-r8a7790", },
 	{ .compatible = "renesas,pci-r8a7791", },
 	{ .compatible = "renesas,pci-r8a7794", },
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 2618f875a600..b3722b7709df 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -233,8 +233,8 @@ struct tegra_msi {
 	struct msi_controller chip;
 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
 	struct irq_domain *domain;
-	unsigned long pages;
 	struct mutex lock;
+	u64 phys;
 	int irq;
 };
 
@@ -1448,9 +1448,8 @@ static int tegra_msi_setup_irq(struct msi_controller *chip,
 
 	irq_set_msi_desc(irq, desc);
 
-	msg.address_lo = virt_to_phys((void *)msi->pages);
-	/* 32 bit address only */
-	msg.address_hi = 0;
+	msg.address_lo = lower_32_bits(msi->phys);
+	msg.address_hi = upper_32_bits(msi->phys);
 	msg.data = hwirq;
 
 	pci_write_msi_msg(irq, &msg);
@@ -1499,7 +1498,6 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
 	const struct tegra_pcie_soc *soc = pcie->soc;
 	struct tegra_msi *msi = &pcie->msi;
 	struct device *dev = pcie->dev;
-	unsigned long base;
 	int err;
 	u32 reg;
 
@@ -1531,12 +1529,25 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
 		goto err;
 	}
 
-	/* setup AFI/FPCI range */
-	msi->pages = __get_free_pages(GFP_KERNEL, 0);
-	base = virt_to_phys((void *)msi->pages);
+	/*
+	 * The PCI host bridge on Tegra contains some logic that intercepts
+	 * MSI writes, which means that the MSI target address doesn't have
+	 * to point to actual physical memory. Rather than allocating one 4
+	 * KiB page of system memory that's never used, we can simply pick
+	 * an arbitrary address within an area reserved for system memory
+	 * in the FPCI address map.
+	 *
+	 * However, in order to avoid confusion, we pick an address that
+	 * doesn't map to physical memory. The FPCI address map reserves a
+	 * 1012 GiB region for system memory and memory-mapped I/O. Since
+	 * none of the Tegra SoCs that contain this PCI host bridge can
+	 * address more than 16 GiB of system memory, the last 4 KiB of
+	 * these 1012 GiB is a good candidate.
+	 */
+	msi->phys = 0xfcfffff000;
 
-	afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
-	afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
+	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
+	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
 	/* this register is in 4K increments */
 	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
 
@@ -1585,8 +1596,6 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
 	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
 	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
 
-	free_pages(msi->pages, 0);
-
 	if (msi->irq > 0)
 		free_irq(msi->irq, pcie);
 
@@ -2238,7 +2247,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
 	struct pci_bus *child;
 	int err;
 
-	host = pci_alloc_host_bridge(sizeof(*pcie));
+	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
 	if (!host)
 		return -ENOMEM;
 
@@ -2284,16 +2293,15 @@ static int tegra_pcie_probe(struct platform_device *pdev)
 	host->busnr = pcie->busn.start;
 	host->dev.parent = &pdev->dev;
 	host->ops = &tegra_pcie_ops;
+	host->map_irq = tegra_pcie_map_irq;
+	host->swizzle_irq = pci_common_swizzle;
 
-	err = pci_register_host_bridge(host);
+	err = pci_scan_root_bus_bridge(host);
 	if (err < 0) {
 		dev_err(dev, "failed to register host: %d\n", err);
 		goto disable_msi;
 	}
 
-	pci_scan_child_bus(host->bus);
-
-	pci_fixup_irqs(pci_common_swizzle, tegra_pcie_map_irq);
 	pci_bus_size_bridges(host->bus);
 	pci_bus_assign_resources(host->bus);
 
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 9281eee2d000..d417acab0ecf 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -120,30 +120,35 @@ out_release_res:
 
 static int versatile_pci_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct resource *res;
 	int ret, i, myslot = -1;
 	u32 val;
 	void __iomem *local_pci_cfg_base;
 	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
 	LIST_HEAD(pci_res);
 
+	bridge = devm_pci_alloc_host_bridge(dev, 0);
+	if (!bridge)
+		return -ENOMEM;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	versatile_pci_base = devm_ioremap_resource(&pdev->dev, res);
+	versatile_pci_base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(versatile_pci_base))
 		return PTR_ERR(versatile_pci_base);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res);
+	versatile_cfg_base[0] = devm_ioremap_resource(dev, res);
 	if (IS_ERR(versatile_cfg_base[0]))
 		return PTR_ERR(versatile_cfg_base[0]);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-	versatile_cfg_base[1] = devm_pci_remap_cfg_resource(&pdev->dev,
-							    res);
+	versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res);
 	if (IS_ERR(versatile_cfg_base[1]))
 		return PTR_ERR(versatile_cfg_base[1]);
 
-	ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res);
+	ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res);
 	if (ret)
 		return ret;
 
@@ -159,7 +164,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
 		}
 	}
 	if (myslot == -1) {
-		dev_err(&pdev->dev, "Cannot find PCI core!\n");
+		dev_err(dev, "Cannot find PCI core!\n");
 		return -EIO;
 	}
 	/*
@@ -167,7 +172,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
 	 */
 	pci_slot_ignore |= (1 << myslot);
 
-	dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot);
+	dev_info(dev, "PCI core found (slot %d)\n", myslot);
 
 	writel(myslot, PCI_SELFID);
 	local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
@@ -199,11 +204,20 @@ static int versatile_pci_probe(struct platform_device *pdev)
 	pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
 	pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
 
-	bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, NULL, &pci_res);
-	if (!bus)
-		return -ENOMEM;
+	list_splice_init(&pci_res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = NULL;
+	bridge->busnr = 0;
+	bridge->ops = &pci_versatile_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0)
+		return ret;
+
+	bus = bridge->bus;
 
-	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
 	pci_assign_unassigned_bus_resources(bus);
 	list_for_each_entry(child, &bus->children, node)
 		pcie_bus_configure_settings(child);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 8cae013e7188..bd897479a215 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -636,13 +636,16 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
 	struct xgene_pcie_port *port;
 	resource_size_t iobase = 0;
 	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
 	int ret;
 	LIST_HEAD(res);
 
-	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
-	if (!port)
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+	if (!bridge)
 		return -ENOMEM;
 
+	port = pci_host_bridge_priv(bridge);
+
 	port->node = of_node_get(dn);
 	port->dev = dev;
 
@@ -670,11 +673,19 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
 	if (ret)
 		goto error;
 
-	bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res);
-	if (!bus) {
-		ret = -ENOMEM;
+	list_splice_init(&res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = port;
+	bridge->busnr = 0;
+	bridge->ops = &xgene_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0)
 		goto error;
-	}
+
+	bus = bridge->bus;
 
 	pci_scan_child_bus(bus);
 	pci_assign_unassigned_bus_resources(bus);
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index 75ec5cea26f6..4ea4f8f5dc77 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -579,12 +579,14 @@ static int altera_pcie_probe(struct platform_device *pdev)
 	struct altera_pcie *pcie;
 	struct pci_bus *bus;
 	struct pci_bus *child;
+	struct pci_host_bridge *bridge;
 	int ret;
 
-	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
-	if (!pcie)
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!bridge)
 		return -ENOMEM;
 
+	pcie = pci_host_bridge_priv(bridge);
 	pcie->pdev = pdev;
 
 	ret = altera_pcie_parse_dt(pcie);
@@ -613,12 +615,20 @@ static int altera_pcie_probe(struct platform_device *pdev)
 	cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
 	altera_pcie_host_init(pcie);
 
-	bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops,
-				pcie, &pcie->resources);
-	if (!bus)
-		return -ENOMEM;
+	list_splice_init(&pcie->resources, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = pcie->root_bus_nr;
+	bridge->ops = &altera_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0)
+		return ret;
+
+	bus = bridge->bus;
 
-	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
 	pci_assign_unassigned_bus_resources(bus);
 
 	/* Configure PCI Express setting. */
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 384c27e664fe..f03d5e3612e9 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -45,12 +45,15 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
 	struct device *dev = &bdev->dev;
 	struct iproc_pcie *pcie;
 	LIST_HEAD(resources);
+	struct pci_host_bridge *bridge;
 	int ret;
 
-	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
-	if (!pcie)
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!bridge)
 		return -ENOMEM;
 
+	pcie = pci_host_bridge_priv(bridge);
+
 	pcie->dev = dev;
 
 	pcie->type = IPROC_PCIE_PAXB_BCMA;
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 90d2bdd94e41..22531190bc40 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -52,12 +52,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
 	struct resource reg;
 	resource_size_t iobase = 0;
 	LIST_HEAD(resources);
+	struct pci_host_bridge *bridge;
 	int ret;
 
-	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
-	if (!pcie)
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!bridge)
 		return -ENOMEM;
 
+	pcie = pci_host_bridge_priv(bridge);
+
 	pcie->dev = dev;
 	pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
 
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 0f39bd2a04cb..c57486348856 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -452,14 +452,13 @@ static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
  * Note access to the configuration registers are protected at the higher layer
  * by 'pci_lock' in drivers/pci/access.c
  */
-static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
+static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
+					    int busno,
 					    unsigned int devfn,
 					    int where)
 {
-	struct iproc_pcie *pcie = iproc_data(bus);
 	unsigned slot = PCI_SLOT(devfn);
 	unsigned fn = PCI_FUNC(devfn);
-	unsigned busno = bus->number;
 	u32 val;
 	u16 offset;
 
@@ -499,6 +498,58 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
 		return (pcie->base + offset);
 }
 
+static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
+						unsigned int devfn,
+						int where)
+{
+	return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn,
+				      where);
+}
+
+static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
+				       unsigned int devfn, int where,
+				       int size, u32 *val)
+{
+	void __iomem *addr;
+
+	addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
+	if (!addr) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	*val = readl(addr);
+
+	if (size <= 2)
+		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie,
+					unsigned int devfn, int where,
+					int size, u32 val)
+{
+	void __iomem *addr;
+	u32 mask, tmp;
+
+	addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
+	if (!addr)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (size == 4) {
+		writel(val, addr);
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+	mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+	tmp = readl(addr) & mask;
+	tmp |= val << ((where & 0x3) * 8);
+	writel(tmp, addr);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
 static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
 				    int where, int size, u32 *val)
 {
@@ -524,7 +575,7 @@ static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
 }
 
 static struct pci_ops iproc_pcie_ops = {
-	.map_bus = iproc_pcie_map_cfg_bus,
+	.map_bus = iproc_pcie_bus_map_cfg_bus,
 	.read = iproc_pcie_config_read32,
 	.write = iproc_pcie_config_write32,
 };
@@ -556,12 +607,11 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
 	msleep(100);
 }
 
-static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
+static int iproc_pcie_check_link(struct iproc_pcie *pcie)
 {
 	struct device *dev = pcie->dev;
-	u8 hdr_type;
-	u32 link_ctrl, class, val;
-	u16 pos = PCI_EXP_CAP, link_status;
+	u32 hdr_type, link_ctrl, link_status, class, val;
+	u16 pos = PCI_EXP_CAP;
 	bool link_is_active = false;
 
 	/*
@@ -578,7 +628,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
 	}
 
 	/* make sure we are not in EP mode */
-	pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
+	iproc_pci_raw_config_read32(pcie,  0, PCI_HEADER_TYPE, 1, &hdr_type);
 	if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
 		dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
 		return -EFAULT;
@@ -588,13 +638,16 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
 #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
 #define PCI_CLASS_BRIDGE_MASK      0xffff00
 #define PCI_CLASS_BRIDGE_SHIFT     8
-	pci_bus_read_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, &class);
+	iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
+				    4, &class);
 	class &= ~PCI_CLASS_BRIDGE_MASK;
 	class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
-	pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
+	iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
+				     4, class);
 
 	/* check link status to see if link is active */
-	pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
+	iproc_pci_raw_config_read32(pcie, 0, pos + PCI_EXP_LNKSTA,
+				    2, &link_status);
 	if (link_status & PCI_EXP_LNKSTA_NLW)
 		link_is_active = true;
 
@@ -603,20 +656,21 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
 #define PCI_TARGET_LINK_SPEED_MASK    0xf
 #define PCI_TARGET_LINK_SPEED_GEN2    0x2
 #define PCI_TARGET_LINK_SPEED_GEN1    0x1
-		pci_bus_read_config_dword(bus, 0,
-					  pos + PCI_EXP_LNKCTL2,
+		iproc_pci_raw_config_read32(pcie, 0,
+					  pos + PCI_EXP_LNKCTL2, 4,
 					  &link_ctrl);
 		if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
 		    PCI_TARGET_LINK_SPEED_GEN2) {
 			link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
 			link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
-			pci_bus_write_config_dword(bus, 0,
-					   pos + PCI_EXP_LNKCTL2,
-					   link_ctrl);
+			iproc_pci_raw_config_write32(pcie, 0,
+						pos + PCI_EXP_LNKCTL2,
+						4, link_ctrl);
 			msleep(100);
 
-			pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
-						 &link_status);
+			iproc_pci_raw_config_read32(pcie, 0,
+						pos + PCI_EXP_LNKSTA,
+						2, &link_status);
 			if (link_status & PCI_EXP_LNKSTA_NLW)
 				link_is_active = true;
 		}
@@ -1205,7 +1259,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
 	struct device *dev;
 	int ret;
 	void *sysdata;
-	struct pci_bus *bus, *child;
+	struct pci_bus *child;
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 
 	dev = pcie->dev;
 
@@ -1252,18 +1307,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
 	sysdata = pcie;
 #endif
 
-	bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res);
-	if (!bus) {
-		dev_err(dev, "unable to create PCI root bus\n");
-		ret = -ENOMEM;
-		goto err_power_off_phy;
-	}
-	pcie->root_bus = bus;
-
-	ret = iproc_pcie_check_link(pcie, bus);
+	ret = iproc_pcie_check_link(pcie);
 	if (ret) {
 		dev_err(dev, "no PCIe EP device detected\n");
-		goto err_rm_root_bus;
+		goto err_power_off_phy;
 	}
 
 	iproc_pcie_enable(pcie);
@@ -1272,23 +1319,31 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
 		if (iproc_pcie_msi_enable(pcie))
 			dev_info(dev, "not using iProc MSI\n");
 
-	pci_scan_child_bus(bus);
-	pci_assign_unassigned_bus_resources(bus);
+	list_splice_init(res, &host->windows);
+	host->busnr = 0;
+	host->dev.parent = dev;
+	host->ops = &iproc_pcie_ops;
+	host->sysdata = sysdata;
+	host->map_irq = pcie->map_irq;
+	host->swizzle_irq = pci_common_swizzle;
 
-	if (pcie->map_irq)
-		pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+	ret = pci_scan_root_bus_bridge(host);
+	if (ret < 0) {
+		dev_err(dev, "failed to scan host: %d\n", ret);
+		goto err_power_off_phy;
+	}
 
-	list_for_each_entry(child, &bus->children, node)
+	pci_assign_unassigned_bus_resources(host->bus);
+
+	pcie->root_bus = host->bus;
+
+	list_for_each_entry(child, &host->bus->children, node)
 		pcie_bus_configure_settings(child);
 
-	pci_bus_add_devices(bus);
+	pci_bus_add_devices(host->bus);
 
 	return 0;
 
-err_rm_root_bus:
-	pci_stop_root_bus(bus);
-	pci_remove_root_bus(bus);
-
 err_power_off_phy:
 	phy_power_off(pcie->phy);
 err_exit_phy:
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
new file mode 100644
index 000000000000..5a9d8589ea0b
--- /dev/null
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -0,0 +1,554 @@
+/*
+ * MediaTek PCIe host controller driver.
+ *
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+/* PCIe shared registers */
+#define PCIE_SYS_CFG		0x00
+#define PCIE_INT_ENABLE		0x0c
+#define PCIE_CFG_ADDR		0x20
+#define PCIE_CFG_DATA		0x24
+
+/* PCIe per port registers */
+#define PCIE_BAR0_SETUP		0x10
+#define PCIE_CLASS		0x34
+#define PCIE_LINK_STATUS	0x50
+
+#define PCIE_PORT_INT_EN(x)	BIT(20 + (x))
+#define PCIE_PORT_PERST(x)	BIT(1 + (x))
+#define PCIE_PORT_LINKUP	BIT(0)
+#define PCIE_BAR_MAP_MAX	GENMASK(31, 16)
+
+#define PCIE_BAR_ENABLE		BIT(0)
+#define PCIE_REVISION_ID	BIT(0)
+#define PCIE_CLASS_CODE		(0x60400 << 8)
+#define PCIE_CONF_REG(regn)	(((regn) & GENMASK(7, 2)) | \
+				((((regn) >> 8) & GENMASK(3, 0)) << 24))
+#define PCIE_CONF_FUN(fun)	(((fun) << 8) & GENMASK(10, 8))
+#define PCIE_CONF_DEV(dev)	(((dev) << 11) & GENMASK(15, 11))
+#define PCIE_CONF_BUS(bus)	(((bus) << 16) & GENMASK(23, 16))
+#define PCIE_CONF_ADDR(regn, fun, dev, bus) \
+	(PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
+	 PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
+
+/* MediaTek specific configuration registers */
+#define PCIE_FTS_NUM		0x70c
+#define PCIE_FTS_NUM_MASK	GENMASK(15, 8)
+#define PCIE_FTS_NUM_L0(x)	((x) & 0xff << 8)
+
+#define PCIE_FC_CREDIT		0x73c
+#define PCIE_FC_CREDIT_MASK	(GENMASK(31, 31) | GENMASK(28, 16))
+#define PCIE_FC_CREDIT_VAL(x)	((x) << 16)
+
+/**
+ * struct mtk_pcie_port - PCIe port information
+ * @base: IO mapped register base
+ * @list: port list
+ * @pcie: pointer to PCIe host info
+ * @reset: pointer to port reset control
+ * @sys_ck: pointer to bus clock
+ * @phy: pointer to phy control block
+ * @lane: lane count
+ * @index: port index
+ */
+struct mtk_pcie_port {
+	void __iomem *base;
+	struct list_head list;
+	struct mtk_pcie *pcie;
+	struct reset_control *reset;
+	struct clk *sys_ck;
+	struct phy *phy;
+	u32 lane;
+	u32 index;
+};
+
+/**
+ * struct mtk_pcie - PCIe host information
+ * @dev: pointer to PCIe device
+ * @base: IO mapped register base
+ * @free_ck: free-run reference clock
+ * @io: IO resource
+ * @pio: PIO resource
+ * @mem: non-prefetchable memory resource
+ * @busn: bus range
+ * @offset: IO / Memory offset
+ * @ports: pointer to PCIe port information
+ */
+struct mtk_pcie {
+	struct device *dev;
+	void __iomem *base;
+	struct clk *free_ck;
+
+	struct resource io;
+	struct resource pio;
+	struct resource mem;
+	struct resource busn;
+	struct {
+		resource_size_t mem;
+		resource_size_t io;
+	} offset;
+	struct list_head ports;
+};
+
+static inline bool mtk_pcie_link_up(struct mtk_pcie_port *port)
+{
+	return !!(readl(port->base + PCIE_LINK_STATUS) & PCIE_PORT_LINKUP);
+}
+
+static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+
+	clk_disable_unprepare(pcie->free_ck);
+
+	if (dev->pm_domain) {
+		pm_runtime_put_sync(dev);
+		pm_runtime_disable(dev);
+	}
+}
+
+static void mtk_pcie_port_free(struct mtk_pcie_port *port)
+{
+	struct mtk_pcie *pcie = port->pcie;
+	struct device *dev = pcie->dev;
+
+	devm_iounmap(dev, port->base);
+	list_del(&port->list);
+	devm_kfree(dev, port);
+}
+
+static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
+{
+	struct mtk_pcie_port *port, *tmp;
+
+	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+		phy_power_off(port->phy);
+		clk_disable_unprepare(port->sys_ck);
+		mtk_pcie_port_free(port);
+	}
+
+	mtk_pcie_subsys_powerdown(pcie);
+}
+
+static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
+				      unsigned int devfn, int where)
+{
+	struct pci_host_bridge *host = pci_find_host_bridge(bus);
+	struct mtk_pcie *pcie = pci_host_bridge_priv(host);
+
+	writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
+			      bus->number), pcie->base + PCIE_CFG_ADDR);
+
+	return pcie->base + PCIE_CFG_DATA + (where & 3);
+}
+
+static struct pci_ops mtk_pcie_ops = {
+	.map_bus = mtk_pcie_map_bus,
+	.read  = pci_generic_config_read,
+	.write = pci_generic_config_write,
+};
+
+static void mtk_pcie_configure_rc(struct mtk_pcie_port *port)
+{
+	struct mtk_pcie *pcie = port->pcie;
+	u32 func = PCI_FUNC(port->index << 3);
+	u32 slot = PCI_SLOT(port->index << 3);
+	u32 val;
+
+	/* enable interrupt */
+	val = readl(pcie->base + PCIE_INT_ENABLE);
+	val |= PCIE_PORT_INT_EN(port->index);
+	writel(val, pcie->base + PCIE_INT_ENABLE);
+
+	/* map to all DDR region. We need to set it before cfg operation. */
+	writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
+	       port->base + PCIE_BAR0_SETUP);
+
+	/* configure class code and revision ID */
+	writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
+
+	/* configure FC credit */
+	writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
+	       pcie->base + PCIE_CFG_ADDR);
+	val = readl(pcie->base + PCIE_CFG_DATA);
+	val &= ~PCIE_FC_CREDIT_MASK;
+	val |= PCIE_FC_CREDIT_VAL(0x806c);
+	writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
+	       pcie->base + PCIE_CFG_ADDR);
+	writel(val, pcie->base + PCIE_CFG_DATA);
+
+	/* configure RC FTS number to 250 when it leaves L0s */
+	writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
+	       pcie->base + PCIE_CFG_ADDR);
+	val = readl(pcie->base + PCIE_CFG_DATA);
+	val &= ~PCIE_FTS_NUM_MASK;
+	val |= PCIE_FTS_NUM_L0(0x50);
+	writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
+	       pcie->base + PCIE_CFG_ADDR);
+	writel(val, pcie->base + PCIE_CFG_DATA);
+}
+
+static void mtk_pcie_assert_ports(struct mtk_pcie_port *port)
+{
+	struct mtk_pcie *pcie = port->pcie;
+	u32 val;
+
+	/* assert port PERST_N */
+	val = readl(pcie->base + PCIE_SYS_CFG);
+	val |= PCIE_PORT_PERST(port->index);
+	writel(val, pcie->base + PCIE_SYS_CFG);
+
+	/* de-assert port PERST_N */
+	val = readl(pcie->base + PCIE_SYS_CFG);
+	val &= ~PCIE_PORT_PERST(port->index);
+	writel(val, pcie->base + PCIE_SYS_CFG);
+
+	/* PCIe v2.0 need at least 100ms delay to train from Gen1 to Gen2 */
+	msleep(100);
+}
+
+static void mtk_pcie_enable_ports(struct mtk_pcie_port *port)
+{
+	struct device *dev = port->pcie->dev;
+	int err;
+
+	err = clk_prepare_enable(port->sys_ck);
+	if (err) {
+		dev_err(dev, "failed to enable port%d clock\n", port->index);
+		goto err_sys_clk;
+	}
+
+	reset_control_assert(port->reset);
+	reset_control_deassert(port->reset);
+
+	err = phy_power_on(port->phy);
+	if (err) {
+		dev_err(dev, "failed to power on port%d phy\n", port->index);
+		goto err_phy_on;
+	}
+
+	mtk_pcie_assert_ports(port);
+
+	/* if link up, then setup root port configuration space */
+	if (mtk_pcie_link_up(port)) {
+		mtk_pcie_configure_rc(port);
+		return;
+	}
+
+	dev_info(dev, "Port%d link down\n", port->index);
+
+	phy_power_off(port->phy);
+err_phy_on:
+	clk_disable_unprepare(port->sys_ck);
+err_sys_clk:
+	mtk_pcie_port_free(port);
+}
+
+static int mtk_pcie_parse_ports(struct mtk_pcie *pcie,
+				struct device_node *node,
+				int index)
+{
+	struct mtk_pcie_port *port;
+	struct resource *regs;
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	char name[10];
+	int err;
+
+	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	err = of_property_read_u32(node, "num-lanes", &port->lane);
+	if (err) {
+		dev_err(dev, "missing num-lanes property\n");
+		return err;
+	}
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
+	port->base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(port->base)) {
+		dev_err(dev, "failed to map port%d base\n", index);
+		return PTR_ERR(port->base);
+	}
+
+	snprintf(name, sizeof(name), "sys_ck%d", index);
+	port->sys_ck = devm_clk_get(dev, name);
+	if (IS_ERR(port->sys_ck)) {
+		dev_err(dev, "failed to get port%d clock\n", index);
+		return PTR_ERR(port->sys_ck);
+	}
+
+	snprintf(name, sizeof(name), "pcie-rst%d", index);
+	port->reset = devm_reset_control_get_optional(dev, name);
+	if (PTR_ERR(port->reset) == -EPROBE_DEFER)
+		return PTR_ERR(port->reset);
+
+	/* some platforms may use default PHY setting */
+	snprintf(name, sizeof(name), "pcie-phy%d", index);
+	port->phy = devm_phy_optional_get(dev, name);
+	if (IS_ERR(port->phy))
+		return PTR_ERR(port->phy);
+
+	port->index = index;
+	port->pcie = pcie;
+
+	INIT_LIST_HEAD(&port->list);
+	list_add_tail(&port->list, &pcie->ports);
+
+	return 0;
+}
+
+static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource *regs;
+	int err;
+
+	/* get shared registers */
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pcie->base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(pcie->base)) {
+		dev_err(dev, "failed to map shared register\n");
+		return PTR_ERR(pcie->base);
+	}
+
+	pcie->free_ck = devm_clk_get(dev, "free_ck");
+	if (IS_ERR(pcie->free_ck)) {
+		if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		pcie->free_ck = NULL;
+	}
+
+	if (dev->pm_domain) {
+		pm_runtime_enable(dev);
+		pm_runtime_get_sync(dev);
+	}
+
+	/* enable top level clock */
+	err = clk_prepare_enable(pcie->free_ck);
+	if (err) {
+		dev_err(dev, "failed to enable free_ck\n");
+		goto err_free_ck;
+	}
+
+	return 0;
+
+err_free_ck:
+	if (dev->pm_domain) {
+		pm_runtime_put_sync(dev);
+		pm_runtime_disable(dev);
+	}
+
+	return err;
+}
+
+static int mtk_pcie_setup(struct mtk_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct device_node *node = dev->of_node, *child;
+	struct of_pci_range_parser parser;
+	struct of_pci_range range;
+	struct resource res;
+	struct mtk_pcie_port *port, *tmp;
+	int err;
+
+	if (of_pci_range_parser_init(&parser, node)) {
+		dev_err(dev, "missing \"ranges\" property\n");
+		return -EINVAL;
+	}
+
+	for_each_of_pci_range(&parser, &range) {
+		err = of_pci_range_to_resource(&range, node, &res);
+		if (err < 0)
+			return err;
+
+		switch (res.flags & IORESOURCE_TYPE_BITS) {
+		case IORESOURCE_IO:
+			pcie->offset.io = res.start - range.pci_addr;
+
+			memcpy(&pcie->pio, &res, sizeof(res));
+			pcie->pio.name = node->full_name;
+
+			pcie->io.start = range.cpu_addr;
+			pcie->io.end = range.cpu_addr + range.size - 1;
+			pcie->io.flags = IORESOURCE_MEM;
+			pcie->io.name = "I/O";
+
+			memcpy(&res, &pcie->io, sizeof(res));
+			break;
+
+		case IORESOURCE_MEM:
+			pcie->offset.mem = res.start - range.pci_addr;
+
+			memcpy(&pcie->mem, &res, sizeof(res));
+			pcie->mem.name = "non-prefetchable";
+			break;
+		}
+	}
+
+	err = of_pci_parse_bus_range(node, &pcie->busn);
+	if (err < 0) {
+		dev_err(dev, "failed to parse bus ranges property: %d\n", err);
+		pcie->busn.name = node->name;
+		pcie->busn.start = 0;
+		pcie->busn.end = 0xff;
+		pcie->busn.flags = IORESOURCE_BUS;
+	}
+
+	for_each_available_child_of_node(node, child) {
+		int index;
+
+		err = of_pci_get_devfn(child);
+		if (err < 0) {
+			dev_err(dev, "failed to parse devfn: %d\n", err);
+			return err;
+		}
+
+		index = PCI_SLOT(err);
+
+		err = mtk_pcie_parse_ports(pcie, child, index);
+		if (err)
+			return err;
+	}
+
+	err = mtk_pcie_subsys_powerup(pcie);
+	if (err)
+		return err;
+
+	/* enable each port, and then check link status */
+	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+		mtk_pcie_enable_ports(port);
+
+	/* power down PCIe subsys if slots are all empty (link down) */
+	if (list_empty(&pcie->ports))
+		mtk_pcie_subsys_powerdown(pcie);
+
+	return 0;
+}
+
+static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
+{
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct list_head *windows = &host->windows;
+	struct device *dev = pcie->dev;
+	int err;
+
+	pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
+	pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
+	pci_add_resource(windows, &pcie->busn);
+
+	err = devm_request_pci_bus_resources(dev, windows);
+	if (err < 0)
+		return err;
+
+	pci_remap_iospace(&pcie->pio, pcie->io.start);
+
+	return 0;
+}
+
+static int mtk_pcie_register_host(struct pci_host_bridge *host)
+{
+	struct mtk_pcie *pcie = pci_host_bridge_priv(host);
+	struct pci_bus *child;
+	int err;
+
+	host->busnr = pcie->busn.start;
+	host->dev.parent = pcie->dev;
+	host->ops = &mtk_pcie_ops;
+	host->map_irq = of_irq_parse_and_map_pci;
+	host->swizzle_irq = pci_common_swizzle;
+
+	err = pci_scan_root_bus_bridge(host);
+	if (err < 0)
+		return err;
+
+	pci_bus_size_bridges(host->bus);
+	pci_bus_assign_resources(host->bus);
+
+	list_for_each_entry(child, &host->bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(host->bus);
+
+	return 0;
+}
+
+static int mtk_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_pcie *pcie;
+	struct pci_host_bridge *host;
+	int err;
+
+	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!host)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(host);
+
+	pcie->dev = dev;
+	platform_set_drvdata(pdev, pcie);
+	INIT_LIST_HEAD(&pcie->ports);
+
+	err = mtk_pcie_setup(pcie);
+	if (err)
+		return err;
+
+	err = mtk_pcie_request_resources(pcie);
+	if (err)
+		goto put_resources;
+
+	err = mtk_pcie_register_host(host);
+	if (err)
+		goto put_resources;
+
+	return 0;
+
+put_resources:
+	if (!list_empty(&pcie->ports))
+		mtk_pcie_put_resources(pcie);
+
+	return err;
+}
+
+static const struct of_device_id mtk_pcie_ids[] = {
+	{ .compatible = "mediatek,mt7623-pcie"},
+	{ .compatible = "mediatek,mt2701-pcie"},
+	{},
+};
+
+static struct platform_driver mtk_pcie_driver = {
+	.probe = mtk_pcie_probe,
+	.driver = {
+		.name = "mtk-pcie",
+		.of_match_table = mtk_pcie_ids,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(mtk_pcie_driver);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index cb07c45c1858..246d485b24c6 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -450,29 +450,33 @@ done:
 static int rcar_pcie_enable(struct rcar_pcie *pcie)
 {
 	struct device *dev = pcie->dev;
+	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
 	struct pci_bus *bus, *child;
-	LIST_HEAD(res);
+	int ret;
 
 	/* Try setting 5 GT/s link speed */
 	rcar_pcie_force_speedup(pcie);
 
-	rcar_pcie_setup(&res, pcie);
+	rcar_pcie_setup(&bridge->windows, pcie);
 
 	pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
 
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = pcie->root_bus_nr;
+	bridge->ops = &rcar_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
 	if (IS_ENABLED(CONFIG_PCI_MSI))
-		bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr,
-				&rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
-	else
-		bus = pci_scan_root_bus(dev, pcie->root_bus_nr,
-				&rcar_pcie_ops, pcie, &res);
+		bridge->msi = &pcie->msi.chip;
 
-	if (!bus) {
-		dev_err(dev, "Scanning rootbus failed");
-		return -ENODEV;
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0) {
+		kfree(bridge);
+		return ret;
 	}
 
-	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+	bus = bridge->bus;
 
 	pci_bus_size_bridges(bus);
 	pci_bus_assign_resources(bus);
@@ -1127,11 +1131,14 @@ static int rcar_pcie_probe(struct platform_device *pdev)
 	unsigned int data;
 	int err;
 	int (*hw_init_fn)(struct rcar_pcie *);
+	struct pci_host_bridge *bridge;
 
-	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
-	if (!pcie)
+	bridge = pci_alloc_host_bridge(sizeof(*pcie));
+	if (!bridge)
 		return -ENOMEM;
 
+	pcie = pci_host_bridge_priv(bridge);
+
 	pcie->dev = dev;
 
 	INIT_LIST_HEAD(&pcie->resources);
@@ -1141,12 +1148,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
 	err = rcar_pcie_get_resources(pcie);
 	if (err < 0) {
 		dev_err(dev, "failed to request resources: %d\n", err);
-		return err;
+		goto err_free_bridge;
 	}
 
 	err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
 	if (err)
-		return err;
+		goto err_free_bridge;
 
 	pm_runtime_enable(dev);
 	err = pm_runtime_get_sync(dev);
@@ -1183,6 +1190,9 @@ static int rcar_pcie_probe(struct platform_device *pdev)
 
 	return 0;
 
+err_free_bridge:
+	pci_free_host_bridge(bridge);
+
 err_pm_put:
 	pm_runtime_put(dev);
 
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index 0e020b6e0943..5acf8694fb23 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -139,6 +139,7 @@
 		 PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
 		 PCIE_CORE_INT_MMVC)
 
+#define PCIE_RC_CONFIG_NORMAL_BASE	0x800000
 #define PCIE_RC_CONFIG_BASE		0xa00000
 #define PCIE_RC_CONFIG_RID_CCR		(PCIE_RC_CONFIG_BASE + 0x08)
 #define   PCIE_RC_CONFIG_SCC_SHIFT		16
@@ -146,6 +147,9 @@
 #define   PCIE_RC_CONFIG_DCR_CSPL_SHIFT		18
 #define   PCIE_RC_CONFIG_DCR_CSPL_LIMIT		0xff
 #define   PCIE_RC_CONFIG_DCR_CPLS_SHIFT		26
+#define PCIE_RC_CONFIG_DCSR		(PCIE_RC_CONFIG_BASE + 0xc8)
+#define   PCIE_RC_CONFIG_DCSR_MPS_MASK		GENMASK(7, 5)
+#define   PCIE_RC_CONFIG_DCSR_MPS_256		(0x1 << 5)
 #define PCIE_RC_CONFIG_LINK_CAP		(PCIE_RC_CONFIG_BASE + 0xcc)
 #define   PCIE_RC_CONFIG_LINK_CAP_L0S		BIT(10)
 #define PCIE_RC_CONFIG_LCS		(PCIE_RC_CONFIG_BASE + 0xd0)
@@ -175,6 +179,8 @@
 #define IB_ROOT_PORT_REG_SIZE_SHIFT		3
 #define AXI_WRAPPER_IO_WRITE			0x6
 #define AXI_WRAPPER_MEM_WRITE			0x2
+#define AXI_WRAPPER_TYPE0_CFG			0xa
+#define AXI_WRAPPER_TYPE1_CFG			0xb
 #define AXI_WRAPPER_NOR_MSG			0xc
 
 #define MAX_AXI_IB_ROOTPORT_REGION_NUM		3
@@ -198,6 +204,7 @@
 #define RC_REGION_0_ADDR_TRANS_H		0x00000000
 #define RC_REGION_0_ADDR_TRANS_L		0x00000000
 #define RC_REGION_0_PASS_BITS			(25 - 1)
+#define RC_REGION_0_TYPE_MASK			GENMASK(3, 0)
 #define MAX_AXI_WRAPPER_REGION_NUM		33
 
 struct rockchip_pcie {
@@ -295,7 +302,9 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
 static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
 				     int where, int size, u32 *val)
 {
-	void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where;
+	void __iomem *addr;
+
+	addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
 
 	if (!IS_ALIGNED((uintptr_t)addr, size)) {
 		*val = 0;
@@ -319,11 +328,13 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
 				     int where, int size, u32 val)
 {
 	u32 mask, tmp, offset;
+	void __iomem *addr;
 
 	offset = where & ~0x3;
+	addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
 
 	if (size == 4) {
-		writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+		writel(val, addr);
 		return PCIBIOS_SUCCESSFUL;
 	}
 
@@ -334,13 +345,33 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
 	 * corrupt RW1C bits in adjacent registers.  But the hardware
 	 * doesn't support smaller writes.
 	 */
-	tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask;
+	tmp = readl(addr) & mask;
 	tmp |= val << ((where & 0x3) * 8);
-	writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+	writel(tmp, addr);
 
 	return PCIBIOS_SUCCESSFUL;
 }
 
+static void rockchip_pcie_cfg_configuration_accesses(
+		struct rockchip_pcie *rockchip, u32 type)
+{
+	u32 ob_desc_0;
+
+	/* Configuration Accesses for region 0 */
+	rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
+
+	rockchip_pcie_write(rockchip,
+			    (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
+			    PCIE_CORE_OB_REGION_ADDR0);
+	rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
+			    PCIE_CORE_OB_REGION_ADDR1);
+	ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
+	ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
+	ob_desc_0 |= (type | (0x1 << 23));
+	rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
+	rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
+}
+
 static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
 				       struct pci_bus *bus, u32 devfn,
 				       int where, int size, u32 *val)
@@ -355,6 +386,13 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
 		return PCIBIOS_BAD_REGISTER_NUMBER;
 	}
 
+	if (bus->parent->number == rockchip->root_bus_nr)
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE0_CFG);
+	else
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE1_CFG);
+
 	if (size == 4) {
 		*val = readl(rockchip->reg_base + busdev);
 	} else if (size == 2) {
@@ -379,6 +417,13 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
 	if (!IS_ALIGNED(busdev, size))
 		return PCIBIOS_BAD_REGISTER_NUMBER;
 
+	if (bus->parent->number == rockchip->root_bus_nr)
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE0_CFG);
+	else
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE1_CFG);
+
 	if (size == 4)
 		writel(val, rockchip->reg_base + busdev);
 	else if (size == 2)
@@ -664,15 +709,10 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
 		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
 	}
 
-	rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
-
-	rockchip_pcie_write(rockchip,
-			    (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
-			    PCIE_CORE_OB_REGION_ADDR0);
-	rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
-			    PCIE_CORE_OB_REGION_ADDR1);
-	rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0);
-	rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
+	status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
+	status |= PCIE_RC_CONFIG_DCSR_MPS_256;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
 
 	return 0;
 }
@@ -1156,13 +1196,16 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
 	return 0;
 }
 
-static int rockchip_cfg_atu(struct rockchip_pcie *rockchip)
+static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
 {
 	struct device *dev = rockchip->dev;
 	int offset;
 	int err;
 	int reg_no;
 
+	rockchip_pcie_cfg_configuration_accesses(rockchip,
+						 AXI_WRAPPER_TYPE0_CFG);
+
 	for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
 		err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
 						AXI_WRAPPER_MEM_WRITE,
@@ -1251,6 +1294,9 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
 	clk_disable_unprepare(rockchip->aclk_perf_pcie);
 	clk_disable_unprepare(rockchip->aclk_pcie);
 
+	if (!IS_ERR(rockchip->vpcie0v9))
+		regulator_disable(rockchip->vpcie0v9);
+
 	return ret;
 }
 
@@ -1259,24 +1305,54 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
 	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
 	int err;
 
-	clk_prepare_enable(rockchip->clk_pcie_pm);
-	clk_prepare_enable(rockchip->hclk_pcie);
-	clk_prepare_enable(rockchip->aclk_perf_pcie);
-	clk_prepare_enable(rockchip->aclk_pcie);
+	if (!IS_ERR(rockchip->vpcie0v9)) {
+		err = regulator_enable(rockchip->vpcie0v9);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+			return err;
+		}
+	}
+
+	err = clk_prepare_enable(rockchip->clk_pcie_pm);
+	if (err)
+		goto err_pcie_pm;
+
+	err = clk_prepare_enable(rockchip->hclk_pcie);
+	if (err)
+		goto err_hclk_pcie;
+
+	err = clk_prepare_enable(rockchip->aclk_perf_pcie);
+	if (err)
+		goto err_aclk_perf_pcie;
+
+	err = clk_prepare_enable(rockchip->aclk_pcie);
+	if (err)
+		goto err_aclk_pcie;
 
 	err = rockchip_pcie_init_port(rockchip);
 	if (err)
-		return err;
+		goto err_pcie_resume;
 
-	err = rockchip_cfg_atu(rockchip);
+	err = rockchip_pcie_cfg_atu(rockchip);
 	if (err)
-		return err;
+		goto err_pcie_resume;
 
 	/* Need this to enter L1 again */
 	rockchip_pcie_update_txcredit_mui(rockchip);
 	rockchip_pcie_enable_interrupts(rockchip);
 
 	return 0;
+
+err_pcie_resume:
+	clk_disable_unprepare(rockchip->aclk_pcie);
+err_aclk_pcie:
+	clk_disable_unprepare(rockchip->aclk_perf_pcie);
+err_aclk_perf_pcie:
+	clk_disable_unprepare(rockchip->hclk_pcie);
+err_hclk_pcie:
+	clk_disable_unprepare(rockchip->clk_pcie_pm);
+err_pcie_pm:
+	return err;
 }
 
 static int rockchip_pcie_probe(struct platform_device *pdev)
@@ -1284,6 +1360,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
 	struct rockchip_pcie *rockchip;
 	struct device *dev = &pdev->dev;
 	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
 	struct resource_entry *win;
 	resource_size_t io_base;
 	struct resource	*mem;
@@ -1295,10 +1372,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
 	if (!dev->of_node)
 		return -ENODEV;
 
-	rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
-	if (!rockchip)
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
+	if (!bridge)
 		return -ENOMEM;
 
+	rockchip = pci_host_bridge_priv(bridge);
+
 	platform_set_drvdata(pdev, rockchip);
 	rockchip->dev = dev;
 
@@ -1385,22 +1464,30 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
 		}
 	}
 
-	err = rockchip_cfg_atu(rockchip);
+	err = rockchip_pcie_cfg_atu(rockchip);
 	if (err)
 		goto err_free_res;
 
-	rockchip->msg_region = devm_ioremap(rockchip->dev,
-					    rockchip->msg_bus_addr, SZ_1M);
+	rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
 	if (!rockchip->msg_region) {
 		err = -ENOMEM;
 		goto err_free_res;
 	}
 
-	bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res);
-	if (!bus) {
-		err = -ENOMEM;
+	list_splice_init(&res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = rockchip;
+	bridge->busnr = 0;
+	bridge->ops = &rockchip_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	err = pci_scan_root_bus_bridge(bridge);
+	if (!err)
 		goto err_free_res;
-	}
+
+	bus = bridge->bus;
+
 	rockchip->root_bus = bus;
 
 	pci_bus_size_bridges(bus);
diff --git a/drivers/pci/host/pcie-tango.c b/drivers/pci/host/pcie-tango.c
new file mode 100644
index 000000000000..6bbb81f06a53
--- /dev/null
+++ b/drivers/pci/host/pcie-tango.c
@@ -0,0 +1,141 @@
+#include <linux/pci-ecam.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#define SMP8759_MUX		0x48
+#define SMP8759_TEST_OUT	0x74
+
+struct tango_pcie {
+	void __iomem *base;
+};
+
+static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn,
+			       int where, int size, u32 *val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
+	int ret;
+
+	/* Reads in configuration space outside devfn 0 return garbage */
+	if (devfn != 0)
+		return PCIBIOS_FUNC_NOT_SUPPORTED;
+
+	/*
+	 * PCI config and MMIO accesses are muxed.  Linux doesn't have a
+	 * mutual exclusion mechanism for config vs. MMIO accesses, so
+	 * concurrent accesses may cause corruption.
+	 */
+	writel_relaxed(1, pcie->base + SMP8759_MUX);
+	ret = pci_generic_config_read(bus, devfn, where, size, val);
+	writel_relaxed(0, pcie->base + SMP8759_MUX);
+
+	return ret;
+}
+
+static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn,
+				int where, int size, u32 val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
+	int ret;
+
+	writel_relaxed(1, pcie->base + SMP8759_MUX);
+	ret = pci_generic_config_write(bus, devfn, where, size, val);
+	writel_relaxed(0, pcie->base + SMP8759_MUX);
+
+	return ret;
+}
+
+static struct pci_ecam_ops smp8759_ecam_ops = {
+	.bus_shift	= 20,
+	.pci_ops	= {
+		.map_bus	= pci_ecam_map_bus,
+		.read		= smp8759_config_read,
+		.write		= smp8759_config_write,
+	}
+};
+
+static int tango_pcie_link_up(struct tango_pcie *pcie)
+{
+	void __iomem *test_out = pcie->base + SMP8759_TEST_OUT;
+	int i;
+
+	writel_relaxed(16, test_out);
+	for (i = 0; i < 10; ++i) {
+		u32 ltssm_state = readl_relaxed(test_out) >> 8;
+		if ((ltssm_state & 0x1f) == 0xf) /* L0 */
+			return 1;
+		usleep_range(3000, 4000);
+	}
+
+	return 0;
+}
+
+static int tango_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct tango_pcie *pcie;
+	struct resource *res;
+	int ret;
+
+	dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n");
+	add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	pcie->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->base))
+		return PTR_ERR(pcie->base);
+
+	platform_set_drvdata(pdev, pcie);
+
+	if (!tango_pcie_link_up(pcie))
+		return -ENODEV;
+
+	return pci_host_common_probe(pdev, &smp8759_ecam_ops);
+}
+
+static const struct of_device_id tango_pcie_ids[] = {
+	{ .compatible = "sigma,smp8759-pcie" },
+	{ },
+};
+
+static struct platform_driver tango_pcie_driver = {
+	.probe	= tango_pcie_probe,
+	.driver	= {
+		.name = KBUILD_MODNAME,
+		.of_match_table = tango_pcie_ids,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(tango_pcie_driver);
+
+/*
+ * The root complex advertises the wrong device class.
+ * Header Type 1 is for PCI-to-PCI bridges.
+ */
+static void tango_fixup_class(struct pci_dev *dev)
+{
+	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class);
+
+/*
+ * The root complex exposes a "fake" BAR, which is used to filter
+ * bus-to-system accesses.  Only accesses within the range defined by this
+ * BAR are forwarded to the host, others are ignored.
+ *
+ * By default, the DMA framework expects an identity mapping, and DRAM0 is
+ * mapped at 0x80000000.
+ */
+static void tango_fixup_bar(struct pci_dev *dev)
+{
+	dev->non_compliant_bars = true;
+	pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar);
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 4b16b26ae909..eec641a34fc5 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -172,6 +172,7 @@ struct nwl_pcie {
 	u8 root_busno;
 	struct nwl_msi msi;
 	struct irq_domain *legacy_irq_domain;
+	raw_spinlock_t leg_mask_lock;
 };
 
 static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
@@ -383,11 +384,52 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
 	chained_irq_exit(chip, desc);
 }
 
+static void nwl_mask_leg_irq(struct irq_data *data)
+{
+	struct irq_desc *desc = irq_to_desc(data->irq);
+	struct nwl_pcie *pcie;
+	unsigned long flags;
+	u32 mask;
+	u32 val;
+
+	pcie = irq_desc_get_chip_data(desc);
+	mask = 1 << (data->hwirq - 1);
+	raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+	nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
+	raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
+}
+
+static void nwl_unmask_leg_irq(struct irq_data *data)
+{
+	struct irq_desc *desc = irq_to_desc(data->irq);
+	struct nwl_pcie *pcie;
+	unsigned long flags;
+	u32 mask;
+	u32 val;
+
+	pcie = irq_desc_get_chip_data(desc);
+	mask = 1 << (data->hwirq - 1);
+	raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+	nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
+	raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
+}
+
+static struct irq_chip nwl_leg_irq_chip = {
+	.name = "nwl_pcie:legacy",
+	.irq_enable = nwl_unmask_leg_irq,
+	.irq_disable = nwl_mask_leg_irq,
+	.irq_mask = nwl_mask_leg_irq,
+	.irq_unmask = nwl_unmask_leg_irq,
+};
+
 static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
 			  irq_hw_number_t hwirq)
 {
-	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
 	irq_set_chip_data(irq, domain->host_data);
+	irq_set_status_flags(irq, IRQ_LEVEL);
 
 	return 0;
 }
@@ -526,11 +568,12 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
 		return -ENOMEM;
 	}
 
+	raw_spin_lock_init(&pcie->leg_mask_lock);
 	nwl_pcie_init_msi_irq_domain(pcie);
 	return 0;
 }
 
-static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
+static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
 {
 	struct device *dev = pcie->dev;
 	struct platform_device *pdev = to_platform_device(dev);
@@ -791,13 +834,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
 	struct nwl_pcie *pcie;
 	struct pci_bus *bus;
 	struct pci_bus *child;
+	struct pci_host_bridge *bridge;
 	int err;
 	resource_size_t iobase = 0;
 	LIST_HEAD(res);
 
-	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
-	if (!pcie)
-		return -ENOMEM;
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!bridge)
+		return -ENODEV;
+
+	pcie = pci_host_bridge_priv(bridge);
 
 	pcie->dev = dev;
 	pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
@@ -830,21 +876,28 @@ static int nwl_pcie_probe(struct platform_device *pdev)
 		goto error;
 	}
 
-	bus = pci_create_root_bus(dev, pcie->root_busno,
-				  &nwl_pcie_ops, pcie, &res);
-	if (!bus) {
-		err = -ENOMEM;
-		goto error;
-	}
+	list_splice_init(&res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = pcie->root_busno;
+	bridge->ops = &nwl_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
 
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
-		err = nwl_pcie_enable_msi(pcie, bus);
+		err = nwl_pcie_enable_msi(pcie);
 		if (err < 0) {
 			dev_err(dev, "failed to enable MSI support: %d\n", err);
 			goto error;
 		}
 	}
-	pci_scan_child_bus(bus);
+
+	err = pci_scan_root_bus_bridge(bridge);
+	if (err)
+		goto error;
+
+	bus = bridge->bus;
+
 	pci_assign_unassigned_bus_resources(bus);
 	list_for_each_entry(child, &bus->children, node)
 		pcie_bus_configure_settings(child);
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 2fe2df51f9f8..f63fa5e0278c 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -633,6 +633,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct xilinx_pcie_port *port;
 	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
 	int err;
 	resource_size_t iobase = 0;
 	LIST_HEAD(res);
@@ -640,9 +641,11 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
 	if (!dev->of_node)
 		return -ENODEV;
 
-	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
-	if (!port)
-		return -ENOMEM;
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+	if (!bridge)
+		return -ENODEV;
+
+	port = pci_host_bridge_priv(bridge);
 
 	port->dev = dev;
 
@@ -671,21 +674,26 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
 	if (err)
 		goto error;
 
-	bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res);
-	if (!bus) {
-		err = -ENOMEM;
-		goto error;
-	}
+
+	list_splice_init(&res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = port;
+	bridge->busnr = 0;
+	bridge->ops = &xilinx_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
 
 #ifdef CONFIG_PCI_MSI
 	xilinx_pcie_msi_chip.dev = dev;
-	bus->msi = &xilinx_pcie_msi_chip;
+	bridge->msi = &xilinx_pcie_msi_chip;
 #endif
-	pci_scan_child_bus(bus);
+	err = pci_scan_root_bus_bridge(bridge);
+	if (err < 0)
+		goto error;
+
+	bus = bridge->bus;
+
 	pci_assign_unassigned_bus_resources(bus);
-#ifndef CONFIG_MICROBLAZE
-	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
-#endif
 	list_for_each_entry(child, &bus->children, node)
 		pcie_bus_configure_settings(child);
 	pci_bus_add_devices(bus);
@@ -696,7 +704,7 @@ error:
 	return err;
 }
 
-static struct of_device_id xilinx_pcie_of_match[] = {
+static const struct of_device_id xilinx_pcie_of_match[] = {
 	{ .compatible = "xlnx,axi-pcie-host-1.00.a", },
 	{}
 };
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 31203d69616d..6088c3083194 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -539,7 +539,10 @@ static void vmd_detach_resources(struct vmd_dev *vmd)
 }
 
 /*
- * VMD domains start at 0x1000 to not clash with ACPI _SEG domains.
+ * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
+ * Per ACPI r6.0, sec 6.5.6,  _SEG returns an integer, of which the lower
+ * 16 bits are the PCI Segment Group (domain) number.  Other bits are
+ * currently reserved.
  */
 static int vmd_find_free_domain(void)
 {
@@ -710,7 +713,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
 		INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
 		err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
-				       vmd_irq, 0, "vmd", &vmd->irqs[i]);
+				       vmd_irq, IRQF_NO_THREAD,
+				       "vmd", &vmd->irqs[i]);
 		if (err)
 			return err;
 	}
@@ -739,10 +743,10 @@ static void vmd_remove(struct pci_dev *dev)
 	struct vmd_dev *vmd = pci_get_drvdata(dev);
 
 	vmd_detach_resources(vmd);
-	vmd_cleanup_srcu(vmd);
 	sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
 	pci_stop_root_bus(vmd->bus);
 	pci_remove_root_bus(vmd->bus);
+	vmd_cleanup_srcu(vmd);
 	vmd_teardown_dma_ops(vmd);
 	irq_domain_remove(vmd->irq_domain);
 }
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index d9dc7363ac77..120485d6f352 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -461,8 +461,6 @@ found:
 	else
 		iov->dev = dev;
 
-	mutex_init(&iov->lock);
-
 	dev->sriov = iov;
 	dev->is_physfn = 1;
 	rc = compute_max_vf_buses(dev);
@@ -491,8 +489,6 @@ static void sriov_release(struct pci_dev *dev)
 	if (dev != dev->sriov->dev)
 		pci_dev_put(dev->sriov->dev);
 
-	mutex_destroy(&dev->sriov->lock);
-
 	kfree(dev->sriov);
 	dev->sriov = NULL;
 }
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index fbad5dca3219..253d92409bb3 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1058,7 +1058,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
 
 	for (;;) {
 		if (affd) {
-			nvec = irq_calc_affinity_vectors(nvec, affd);
+			nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
 			if (nvec < minvec)
 				return -ENOSPC;
 		}
@@ -1097,7 +1097,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
 
 	for (;;) {
 		if (affd) {
-			nvec = irq_calc_affinity_vectors(nvec, affd);
+			nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
 			if (nvec < minvec)
 				return -ENOSPC;
 		}
@@ -1165,16 +1165,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
 	if (flags & PCI_IRQ_AFFINITY) {
 		if (!affd)
 			affd = &msi_default_affd;
-
-		if (affd->pre_vectors + affd->post_vectors > min_vecs)
-			return -EINVAL;
-
-		/*
-		 * If there aren't any vectors left after applying the pre/post
-		 * vectors don't bother with assigning affinity.
-		 */
-		if (affd->pre_vectors + affd->post_vectors == min_vecs)
-			affd = NULL;
 	} else {
 		if (WARN_ON(affd))
 			affd = NULL;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index df4aead394f2..607f677f48d2 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -415,6 +415,8 @@ static int pci_device_probe(struct device *dev)
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct pci_driver *drv = to_pci_driver(dev->driver);
 
+	pci_assign_irq(pci_dev);
+
 	error = pcibios_alloc_irq(pci_dev);
 	if (error < 0)
 		return error;
@@ -967,6 +969,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
 		return pci_legacy_resume_early(dev);
 
 	pci_update_current_state(pci_dev, PCI_D0);
+	pci_restore_state(pci_dev);
 
 	if (drv && drv->pm && drv->pm->thaw_noirq)
 		error = drv->pm->thaw_noirq(dev);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 2d8db3ead6e8..a7a41d9c29df 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -43,9 +43,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
 {
 	const struct dmi_device *dmi;
 	struct dmi_dev_onboard *donboard;
+	int domain_nr;
 	int bus;
 	int devfn;
 
+	domain_nr = pci_domain_nr(pdev->bus);
 	bus = pdev->bus->number;
 	devfn = pdev->devfn;
 
@@ -53,8 +55,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
 	while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD,
 				      NULL, dmi)) != NULL) {
 		donboard = dmi->device_data;
-		if (donboard && donboard->bus == bus &&
-					donboard->devfn == devfn) {
+		if (donboard && donboard->segment == domain_nr &&
+				donboard->bus == bus &&
+				donboard->devfn == devfn) {
 			if (buf) {
 				if (attribute == SMBIOS_ATTR_INSTANCE_SHOW)
 					return scnprintf(buf, PAGE_SIZE,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 31e99613a12e..2f3780b50723 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -154,6 +154,129 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RO(resource);
 
+static ssize_t max_link_speed_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	u32 linkcap;
+	int err;
+	const char *speed;
+
+	err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
+	if (err)
+		return -EINVAL;
+
+	switch (linkcap & PCI_EXP_LNKCAP_SLS) {
+	case PCI_EXP_LNKCAP_SLS_8_0GB:
+		speed = "8 GT/s";
+		break;
+	case PCI_EXP_LNKCAP_SLS_5_0GB:
+		speed = "5 GT/s";
+		break;
+	case PCI_EXP_LNKCAP_SLS_2_5GB:
+		speed = "2.5 GT/s";
+		break;
+	default:
+		speed = "Unknown speed";
+	}
+
+	return sprintf(buf, "%s\n", speed);
+}
+static DEVICE_ATTR_RO(max_link_speed);
+
+static ssize_t max_link_width_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	u32 linkcap;
+	int err;
+
+	err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
+	if (err)
+		return -EINVAL;
+
+	return sprintf(buf, "%u\n", (linkcap & PCI_EXP_LNKCAP_MLW) >> 4);
+}
+static DEVICE_ATTR_RO(max_link_width);
+
+static ssize_t current_link_speed_show(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	u16 linkstat;
+	int err;
+	const char *speed;
+
+	err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
+	if (err)
+		return -EINVAL;
+
+	switch (linkstat & PCI_EXP_LNKSTA_CLS) {
+	case PCI_EXP_LNKSTA_CLS_8_0GB:
+		speed = "8 GT/s";
+		break;
+	case PCI_EXP_LNKSTA_CLS_5_0GB:
+		speed = "5 GT/s";
+		break;
+	case PCI_EXP_LNKSTA_CLS_2_5GB:
+		speed = "2.5 GT/s";
+		break;
+	default:
+		speed = "Unknown speed";
+	}
+
+	return sprintf(buf, "%s\n", speed);
+}
+static DEVICE_ATTR_RO(current_link_speed);
+
+static ssize_t current_link_width_show(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	u16 linkstat;
+	int err;
+
+	err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
+	if (err)
+		return -EINVAL;
+
+	return sprintf(buf, "%u\n",
+		(linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
+}
+static DEVICE_ATTR_RO(current_link_width);
+
+static ssize_t secondary_bus_number_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	u8 sec_bus;
+	int err;
+
+	err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
+	if (err)
+		return -EINVAL;
+
+	return sprintf(buf, "%u\n", sec_bus);
+}
+static DEVICE_ATTR_RO(secondary_bus_number);
+
+static ssize_t subordinate_bus_number_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	u8 sub_bus;
+	int err;
+
+	err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
+	if (err)
+		return -EINVAL;
+
+	return sprintf(buf, "%u\n", sub_bus);
+}
+static DEVICE_ATTR_RO(subordinate_bus_number);
+
 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 			     char *buf)
 {
@@ -472,7 +595,6 @@ static ssize_t sriov_numvfs_store(struct device *dev,
 				  const char *buf, size_t count)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
-	struct pci_sriov *iov = pdev->sriov;
 	int ret;
 	u16 num_vfs;
 
@@ -483,7 +605,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
 	if (num_vfs > pci_sriov_get_totalvfs(pdev))
 		return -ERANGE;
 
-	mutex_lock(&iov->dev->sriov->lock);
+	device_lock(&pdev->dev);
 
 	if (num_vfs == pdev->sriov->num_VFs)
 		goto exit;
@@ -518,7 +640,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
 			 num_vfs, ret);
 
 exit:
-	mutex_unlock(&iov->dev->sriov->lock);
+	device_unlock(&pdev->dev);
 
 	if (ret < 0)
 		return ret;
@@ -629,12 +751,17 @@ static struct attribute *pci_dev_attrs[] = {
 	NULL,
 };
 
-static const struct attribute_group pci_dev_group = {
-	.attrs = pci_dev_attrs,
+static struct attribute *pci_bridge_attrs[] = {
+	&dev_attr_subordinate_bus_number.attr,
+	&dev_attr_secondary_bus_number.attr,
+	NULL,
 };
 
-const struct attribute_group *pci_dev_groups[] = {
-	&pci_dev_group,
+static struct attribute *pcie_dev_attrs[] = {
+	&dev_attr_current_link_speed.attr,
+	&dev_attr_current_link_width.attr,
+	&dev_attr_max_link_width.attr,
+	&dev_attr_max_link_speed.attr,
 	NULL,
 };
 
@@ -1557,6 +1684,57 @@ static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
 	return a->mode;
 }
 
+static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
+					    struct attribute *a, int n)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	if (pci_is_bridge(pdev))
+		return a->mode;
+
+	return 0;
+}
+
+static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
+					  struct attribute *a, int n)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	if (pci_is_pcie(pdev))
+		return a->mode;
+
+	return 0;
+}
+
+static const struct attribute_group pci_dev_group = {
+	.attrs = pci_dev_attrs,
+};
+
+const struct attribute_group *pci_dev_groups[] = {
+	&pci_dev_group,
+	NULL,
+};
+
+static const struct attribute_group pci_bridge_group = {
+	.attrs = pci_bridge_attrs,
+};
+
+const struct attribute_group *pci_bridge_groups[] = {
+	&pci_bridge_group,
+	NULL,
+};
+
+static const struct attribute_group pcie_dev_group = {
+	.attrs = pcie_dev_attrs,
+};
+
+const struct attribute_group *pcie_dev_groups[] = {
+	&pcie_dev_group,
+	NULL,
+};
+
 static struct attribute_group pci_dev_hp_attr_group = {
 	.attrs = pci_dev_hp_attrs,
 	.is_visible = pci_dev_hp_attrs_are_visible,
@@ -1592,12 +1770,24 @@ static struct attribute_group pci_dev_attr_group = {
 	.is_visible = pci_dev_attrs_are_visible,
 };
 
+static struct attribute_group pci_bridge_attr_group = {
+	.attrs = pci_bridge_attrs,
+	.is_visible = pci_bridge_attrs_are_visible,
+};
+
+static struct attribute_group pcie_dev_attr_group = {
+	.attrs = pcie_dev_attrs,
+	.is_visible = pcie_dev_attrs_are_visible,
+};
+
 static const struct attribute_group *pci_dev_attr_groups[] = {
 	&pci_dev_attr_group,
 	&pci_dev_hp_attr_group,
 #ifdef CONFIG_PCI_IOV
 	&sriov_dev_attr_group,
 #endif
+	&pci_bridge_attr_group,
+	&pcie_dev_attr_group,
 	NULL,
 };
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0b5302a9fdae..d88edf5c563b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -28,6 +28,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/pci_hotplug.h>
 #include <linux/vmalloc.h>
+#include <linux/pci-ats.h>
 #include <asm/setup.h>
 #include <asm/dma.h>
 #include <linux/aer.h>
@@ -455,7 +456,7 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
 	pci_bus_for_each_resource(bus, r, i) {
 		if (!r)
 			continue;
-		if (res->start && resource_contains(r, res)) {
+		if (resource_contains(r, res)) {
 
 			/*
 			 * If the window is prefetchable but the BAR is
@@ -1166,6 +1167,8 @@ void pci_restore_state(struct pci_dev *dev)
 
 	/* PCI Express register must be restored first */
 	pci_restore_pcie_state(dev);
+	pci_restore_pasid_state(dev);
+	pci_restore_pri_state(dev);
 	pci_restore_ats_state(dev);
 	pci_restore_vc_state(dev);
 
@@ -1966,12 +1969,13 @@ EXPORT_SYMBOL(pci_wake_from_d3);
 /**
  * pci_target_state - find an appropriate low power state for a given PCI dev
  * @dev: PCI device
+ * @wakeup: Whether or not wakeup functionality will be enabled for the device.
  *
  * Use underlying platform code to find a supported low power state for @dev.
  * If the platform can't manage @dev, return the deepest state from which it
  * can generate wake events, based on any available PME info.
  */
-static pci_power_t pci_target_state(struct pci_dev *dev)
+static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
 {
 	pci_power_t target_state = PCI_D3hot;
 
@@ -2008,7 +2012,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev)
 	if (dev->current_state == PCI_D3cold)
 		target_state = PCI_D3cold;
 
-	if (device_may_wakeup(&dev->dev)) {
+	if (wakeup) {
 		/*
 		 * Find the deepest state from which the device can generate
 		 * wake-up events, make it the target state and enable device
@@ -2034,13 +2038,14 @@ static pci_power_t pci_target_state(struct pci_dev *dev)
  */
 int pci_prepare_to_sleep(struct pci_dev *dev)
 {
-	pci_power_t target_state = pci_target_state(dev);
+	bool wakeup = device_may_wakeup(&dev->dev);
+	pci_power_t target_state = pci_target_state(dev, wakeup);
 	int error;
 
 	if (target_state == PCI_POWER_ERROR)
 		return -EIO;
 
-	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
+	pci_enable_wake(dev, target_state, wakeup);
 
 	error = pci_set_power_state(dev, target_state);
 
@@ -2073,9 +2078,10 @@ EXPORT_SYMBOL(pci_back_from_sleep);
  */
 int pci_finish_runtime_suspend(struct pci_dev *dev)
 {
-	pci_power_t target_state = pci_target_state(dev);
+	pci_power_t target_state;
 	int error;
 
+	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
 	if (target_state == PCI_POWER_ERROR)
 		return -EIO;
 
@@ -2111,8 +2117,8 @@ bool pci_dev_run_wake(struct pci_dev *dev)
 	if (!dev->pme_support)
 		return false;
 
-	/* PME-capable in principle, but not from the intended sleep state */
-	if (!pci_pme_capable(dev, pci_target_state(dev)))
+	/* PME-capable in principle, but not from the target power state */
+	if (!pci_pme_capable(dev, pci_target_state(dev, false)))
 		return false;
 
 	while (bus->parent) {
@@ -2147,9 +2153,10 @@ EXPORT_SYMBOL_GPL(pci_dev_run_wake);
 bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
 {
 	struct device *dev = &pci_dev->dev;
+	bool wakeup = device_may_wakeup(dev);
 
 	if (!pm_runtime_suspended(dev)
-	    || pci_target_state(pci_dev) != pci_dev->current_state
+	    || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
 	    || platform_pci_need_resume(pci_dev)
 	    || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
 		return false;
@@ -2167,7 +2174,7 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
 	spin_lock_irq(&dev->power.lock);
 
 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
-	    !device_may_wakeup(dev))
+	    !wakeup)
 		__pci_pme_active(pci_dev, false);
 
 	spin_unlock_irq(&dev->power.lock);
@@ -3715,46 +3722,6 @@ void pci_intx(struct pci_dev *pdev, int enable)
 }
 EXPORT_SYMBOL_GPL(pci_intx);
 
-/**
- * pci_intx_mask_supported - probe for INTx masking support
- * @dev: the PCI device to operate on
- *
- * Check if the device dev support INTx masking via the config space
- * command word.
- */
-bool pci_intx_mask_supported(struct pci_dev *dev)
-{
-	bool mask_supported = false;
-	u16 orig, new;
-
-	if (dev->broken_intx_masking)
-		return false;
-
-	pci_cfg_access_lock(dev);
-
-	pci_read_config_word(dev, PCI_COMMAND, &orig);
-	pci_write_config_word(dev, PCI_COMMAND,
-			      orig ^ PCI_COMMAND_INTX_DISABLE);
-	pci_read_config_word(dev, PCI_COMMAND, &new);
-
-	/*
-	 * There's no way to protect against hardware bugs or detect them
-	 * reliably, but as long as we know what the value should be, let's
-	 * go ahead and check it.
-	 */
-	if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
-		dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
-			orig, new);
-	} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
-		mask_supported = true;
-		pci_write_config_word(dev, PCI_COMMAND, orig);
-	}
-
-	pci_cfg_access_unlock(dev);
-	return mask_supported;
-}
-EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
-
 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
 {
 	struct pci_bus *bus = dev->bus;
@@ -3805,7 +3772,7 @@ done:
  * @dev: the PCI device to operate on
  *
  * Check if the device dev has its INTx line asserted, mask it and
- * return true in that case. False is returned if not interrupt was
+ * return true in that case. False is returned if no interrupt was
  * pending.
  */
 bool pci_check_and_mask_intx(struct pci_dev *dev)
@@ -4075,40 +4042,6 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
 }
 
-static int __pci_dev_reset(struct pci_dev *dev, int probe)
-{
-	int rc;
-
-	might_sleep();
-
-	rc = pci_dev_specific_reset(dev, probe);
-	if (rc != -ENOTTY)
-		goto done;
-
-	if (pcie_has_flr(dev)) {
-		if (!probe)
-			pcie_flr(dev);
-		rc = 0;
-		goto done;
-	}
-
-	rc = pci_af_flr(dev, probe);
-	if (rc != -ENOTTY)
-		goto done;
-
-	rc = pci_pm_reset(dev, probe);
-	if (rc != -ENOTTY)
-		goto done;
-
-	rc = pci_dev_reset_slot_function(dev, probe);
-	if (rc != -ENOTTY)
-		goto done;
-
-	rc = pci_parent_bus_reset(dev, probe);
-done:
-	return rc;
-}
-
 static void pci_dev_lock(struct pci_dev *dev)
 {
 	pci_cfg_access_lock(dev);
@@ -4134,26 +4067,18 @@ static void pci_dev_unlock(struct pci_dev *dev)
 	pci_cfg_access_unlock(dev);
 }
 
-/**
- * pci_reset_notify - notify device driver of reset
- * @dev: device to be notified of reset
- * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
- *           completed
- *
- * Must be called prior to device access being disabled and after device
- * access is restored.
- */
-static void pci_reset_notify(struct pci_dev *dev, bool prepare)
+static void pci_dev_save_and_disable(struct pci_dev *dev)
 {
 	const struct pci_error_handlers *err_handler =
 			dev->driver ? dev->driver->err_handler : NULL;
-	if (err_handler && err_handler->reset_notify)
-		err_handler->reset_notify(dev, prepare);
-}
 
-static void pci_dev_save_and_disable(struct pci_dev *dev)
-{
-	pci_reset_notify(dev, true);
+	/*
+	 * dev->driver->err_handler->reset_prepare() is protected against
+	 * races with ->remove() by the device lock, which must be held by
+	 * the caller.
+	 */
+	if (err_handler && err_handler->reset_prepare)
+		err_handler->reset_prepare(dev);
 
 	/*
 	 * Wake-up device prior to save.  PM registers default to D0 after
@@ -4175,23 +4100,18 @@ static void pci_dev_save_and_disable(struct pci_dev *dev)
 
 static void pci_dev_restore(struct pci_dev *dev)
 {
-	pci_restore_state(dev);
-	pci_reset_notify(dev, false);
-}
-
-static int pci_dev_reset(struct pci_dev *dev, int probe)
-{
-	int rc;
-
-	if (!probe)
-		pci_dev_lock(dev);
-
-	rc = __pci_dev_reset(dev, probe);
+	const struct pci_error_handlers *err_handler =
+			dev->driver ? dev->driver->err_handler : NULL;
 
-	if (!probe)
-		pci_dev_unlock(dev);
+	pci_restore_state(dev);
 
-	return rc;
+	/*
+	 * dev->driver->err_handler->reset_done() is protected against
+	 * races with ->remove() by the device lock, which must be held by
+	 * the caller.
+	 */
+	if (err_handler && err_handler->reset_done)
+		err_handler->reset_done(dev);
 }
 
 /**
@@ -4213,7 +4133,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
  */
 int __pci_reset_function(struct pci_dev *dev)
 {
-	return pci_dev_reset(dev, 0);
+	int ret;
+
+	pci_dev_lock(dev);
+	ret = __pci_reset_function_locked(dev);
+	pci_dev_unlock(dev);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(__pci_reset_function);
 
@@ -4238,7 +4164,27 @@ EXPORT_SYMBOL_GPL(__pci_reset_function);
  */
 int __pci_reset_function_locked(struct pci_dev *dev)
 {
-	return __pci_dev_reset(dev, 0);
+	int rc;
+
+	might_sleep();
+
+	rc = pci_dev_specific_reset(dev, 0);
+	if (rc != -ENOTTY)
+		return rc;
+	if (pcie_has_flr(dev)) {
+		pcie_flr(dev);
+		return 0;
+	}
+	rc = pci_af_flr(dev, 0);
+	if (rc != -ENOTTY)
+		return rc;
+	rc = pci_pm_reset(dev, 0);
+	if (rc != -ENOTTY)
+		return rc;
+	rc = pci_dev_reset_slot_function(dev, 0);
+	if (rc != -ENOTTY)
+		return rc;
+	return pci_parent_bus_reset(dev, 0);
 }
 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
 
@@ -4255,7 +4201,26 @@ EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
  */
 int pci_probe_reset_function(struct pci_dev *dev)
 {
-	return pci_dev_reset(dev, 1);
+	int rc;
+
+	might_sleep();
+
+	rc = pci_dev_specific_reset(dev, 1);
+	if (rc != -ENOTTY)
+		return rc;
+	if (pcie_has_flr(dev))
+		return 0;
+	rc = pci_af_flr(dev, 1);
+	if (rc != -ENOTTY)
+		return rc;
+	rc = pci_pm_reset(dev, 1);
+	if (rc != -ENOTTY)
+		return rc;
+	rc = pci_dev_reset_slot_function(dev, 1);
+	if (rc != -ENOTTY)
+		return rc;
+
+	return pci_parent_bus_reset(dev, 1);
 }
 
 /**
@@ -4278,15 +4243,17 @@ int pci_reset_function(struct pci_dev *dev)
 {
 	int rc;
 
-	rc = pci_dev_reset(dev, 1);
+	rc = pci_probe_reset_function(dev);
 	if (rc)
 		return rc;
 
+	pci_dev_lock(dev);
 	pci_dev_save_and_disable(dev);
 
-	rc = pci_dev_reset(dev, 0);
+	rc = __pci_reset_function_locked(dev);
 
 	pci_dev_restore(dev);
+	pci_dev_unlock(dev);
 
 	return rc;
 }
@@ -4302,20 +4269,18 @@ int pci_try_reset_function(struct pci_dev *dev)
 {
 	int rc;
 
-	rc = pci_dev_reset(dev, 1);
+	rc = pci_probe_reset_function(dev);
 	if (rc)
 		return rc;
 
-	pci_dev_save_and_disable(dev);
+	if (!pci_dev_trylock(dev))
+		return -EAGAIN;
 
-	if (pci_dev_trylock(dev)) {
-		rc = __pci_dev_reset(dev, 0);
-		pci_dev_unlock(dev);
-	} else
-		rc = -EAGAIN;
+	pci_dev_save_and_disable(dev);
+	rc = __pci_reset_function_locked(dev);
+	pci_dev_unlock(dev);
 
 	pci_dev_restore(dev);
-
 	return rc;
 }
 EXPORT_SYMBOL_GPL(pci_try_reset_function);
@@ -4465,7 +4430,9 @@ static void pci_bus_save_and_disable(struct pci_bus *bus)
 	struct pci_dev *dev;
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
+		pci_dev_lock(dev);
 		pci_dev_save_and_disable(dev);
+		pci_dev_unlock(dev);
 		if (dev->subordinate)
 			pci_bus_save_and_disable(dev->subordinate);
 	}
@@ -4480,7 +4447,9 @@ static void pci_bus_restore(struct pci_bus *bus)
 	struct pci_dev *dev;
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
+		pci_dev_lock(dev);
 		pci_dev_restore(dev);
+		pci_dev_unlock(dev);
 		if (dev->subordinate)
 			pci_bus_restore(dev->subordinate);
 	}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 240b2c0fed4b..03e3d0285aea 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -267,7 +267,6 @@ struct pci_sriov {
 	u16 driver_max_VFs;	/* max num VFs driver supports */
 	struct pci_dev *dev;	/* lowest numbered PF */
 	struct pci_dev *self;	/* this PF */
-	struct mutex lock;	/* lock for setting sriov_numvfs in sysfs */
 	resource_size_t barsz[PCI_SRIOV_NUM_BARS];	/* VF BAR size */
 	bool drivers_autoprobe;	/* auto probing of VFs by driver */
 };
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index 77d2ca99d2ec..c39f32e42b4d 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -92,7 +92,7 @@ static irqreturn_t dpc_irq(int irq, void *context)
 	pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
 	pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID,
 			     &source);
-	if (!status)
+	if (!status || status == (u16)(~0))
 		return IRQ_NONE;
 
 	dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n",
@@ -144,7 +144,7 @@ static int dpc_probe(struct pcie_device *dev)
 
 	dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT);
 
-	ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
+	ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
 	pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
 
 	dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 587aef36030d..4334fd5d7de9 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -13,10 +13,11 @@
 
 #define PCIE_PORT_DEVICE_MAXSERVICES   5
 /*
- * According to the PCI Express Base Specification 2.0, the indices of
- * the MSI-X table entries used by port services must not exceed 31
+ * The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must
+ * be one of the first 32 MSI-X entries.  Per PCI r3.0, sec 6.8.3.1, MSI
+ * supports a maximum of 32 vectors per function.
  */
-#define PCIE_PORT_MAX_MSIX_ENTRIES	32
+#define PCIE_PORT_MAX_MSI_ENTRIES	32
 
 #define get_descriptor_id(type, service) (((type - 4) << 8) | service)
 
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index cea504f6f478..313a21df1692 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -44,14 +44,15 @@ static void release_pcie_device(struct device *dev)
 }
 
 /**
- * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
+ * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
+ * for given port
  * @dev: PCI Express port to handle
  * @irqs: Array of interrupt vectors to populate
  * @mask: Bitmask of port capabilities returned by get_port_device_capability()
  *
  * Return value: 0 on success, error code on failure
  */
-static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
+static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
 {
 	int nr_entries, entry, nvec = 0;
 
@@ -61,8 +62,8 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
 	 * equal to the number of entries this port actually uses, we'll happily
 	 * go through without any tricks.
 	 */
-	nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES,
-			PCI_IRQ_MSIX);
+	nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
+			PCI_IRQ_MSIX | PCI_IRQ_MSI);
 	if (nr_entries < 0)
 		return nr_entries;
 
@@ -70,14 +71,19 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
 		u16 reg16;
 
 		/*
-		 * The code below follows the PCI Express Base Specification 2.0
-		 * stating in Section 6.1.6 that "PME and Hot-Plug Event
-		 * interrupts (when both are implemented) always share the same
-		 * MSI or MSI-X vector, as indicated by the Interrupt Message
-		 * Number field in the PCI Express Capabilities register", where
-		 * according to Section 7.8.2 of the specification "For MSI-X,
-		 * the value in this field indicates which MSI-X Table entry is
-		 * used to generate the interrupt message."
+		 * Per PCIe r3.1, sec 6.1.6, "PME and Hot-Plug Event
+		 * interrupts (when both are implemented) always share the
+		 * same MSI or MSI-X vector, as indicated by the Interrupt
+		 * Message Number field in the PCI Express Capabilities
+		 * register".
+		 *
+		 * Per sec 7.8.2, "For MSI, the [Interrupt Message Number]
+		 * indicates the offset between the base Message Data and
+		 * the interrupt message that is generated."
+		 *
+		 * "For MSI-X, the [Interrupt Message Number] indicates
+		 * which MSI-X Table entry is used to generate the
+		 * interrupt message."
 		 */
 		pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
 		entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
@@ -94,13 +100,17 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
 		u32 reg32, pos;
 
 		/*
-		 * The code below follows Section 7.10.10 of the PCI Express
-		 * Base Specification 2.0 stating that bits 31-27 of the Root
-		 * Error Status Register contain a value indicating which of the
-		 * MSI/MSI-X vectors assigned to the port is going to be used
-		 * for AER, where "For MSI-X, the value in this register
-		 * indicates which MSI-X Table entry is used to generate the
-		 * interrupt message."
+		 * Per PCIe r3.1, sec 7.10.10, the Advanced Error Interrupt
+		 * Message Number in the Root Error Status register
+		 * indicates which MSI/MSI-X vector is used for AER.
+		 *
+		 * "For MSI, the [Advanced Error Interrupt Message Number]
+		 * indicates the offset between the base Message Data and
+		 * the interrupt message that is generated."
+		 *
+		 * "For MSI-X, the [Advanced Error Interrupt Message
+		 * Number] indicates which MSI-X Table entry is used to
+		 * generate the interrupt message."
 		 */
 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
@@ -113,6 +123,33 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
 		nvec = max(nvec, entry + 1);
 	}
 
+	if (mask & PCIE_PORT_SERVICE_DPC) {
+		u16 reg16, pos;
+
+		/*
+		 * Per PCIe r4.0 (v0.9), sec 7.9.15.2, the DPC Interrupt
+		 * Message Number in the DPC Capability register indicates
+		 * which MSI/MSI-X vector is used for DPC.
+		 *
+		 * "For MSI, the [DPC Interrupt Message Number] indicates
+		 * the offset between the base Message Data and the
+		 * interrupt message that is generated."
+		 *
+		 * "For MSI-X, the [DPC Interrupt Message Number] indicates
+		 * which MSI-X Table entry is used to generate the
+		 * interrupt message."
+		 */
+		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
+		pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP, &reg16);
+		entry = reg16 & 0x1f;
+		if (entry >= nr_entries)
+			goto out_free_irqs;
+
+		irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, entry);
+
+		nvec = max(nvec, entry + 1);
+	}
+
 	/*
 	 * If nvec is equal to the allocated number of entries, we can just use
 	 * what we have.  Otherwise, the port has some extra entries not for the
@@ -124,7 +161,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
 
 		/* Now allocate the MSI-X vectors for real */
 		nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
-				PCI_IRQ_MSIX);
+				PCI_IRQ_MSIX | PCI_IRQ_MSI);
 		if (nr_entries < 0)
 			return nr_entries;
 	}
@@ -146,26 +183,29 @@ out_free_irqs:
  */
 static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
 {
-	unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
 	int ret, i;
 
 	for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
 		irqs[i] = -1;
 
 	/*
-	 * If MSI cannot be used for PCIe PME or hotplug, we have to use
-	 * INTx or other interrupts, e.g. system shared interrupt.
+	 * If we support PME or hotplug, but we can't use MSI/MSI-X for
+	 * them, we have to fall back to INTx or other interrupts, e.g., a
+	 * system shared interrupt.
 	 */
-	if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
-	    ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
-		flags &= ~PCI_IRQ_MSI;
-	} else {
-		/* Try to use MSI-X if supported */
-		if (!pcie_port_enable_msix(dev, irqs, mask))
-			return 0;
-	}
+	if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
+		goto legacy_irq;
+
+	if ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())
+		goto legacy_irq;
+
+	/* Try to use MSI-X or MSI if supported */
+	if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
+		return 0;
 
-	ret = pci_alloc_irq_vectors(dev, 1, 1, flags);
+legacy_irq:
+	/* fall back to legacy IRQ */
+	ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
 	if (ret < 0)
 		return -ENODEV;
 
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 19c8950c6c38..c31310db0404 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -510,16 +510,18 @@ static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
 	return b;
 }
 
-static void pci_release_host_bridge_dev(struct device *dev)
+static void devm_pci_release_host_bridge_dev(struct device *dev)
 {
 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
 
 	if (bridge->release_fn)
 		bridge->release_fn(bridge);
+}
 
-	pci_free_resource_list(&bridge->windows);
-
-	kfree(bridge);
+static void pci_release_host_bridge_dev(struct device *dev)
+{
+	devm_pci_release_host_bridge_dev(dev);
+	pci_free_host_bridge(to_pci_host_bridge(dev));
 }
 
 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -531,11 +533,36 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
 		return NULL;
 
 	INIT_LIST_HEAD(&bridge->windows);
+	bridge->dev.release = pci_release_host_bridge_dev;
 
 	return bridge;
 }
 EXPORT_SYMBOL(pci_alloc_host_bridge);
 
+struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
+						   size_t priv)
+{
+	struct pci_host_bridge *bridge;
+
+	bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
+	if (!bridge)
+		return NULL;
+
+	INIT_LIST_HEAD(&bridge->windows);
+	bridge->dev.release = devm_pci_release_host_bridge_dev;
+
+	return bridge;
+}
+EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
+
+void pci_free_host_bridge(struct pci_host_bridge *bridge)
+{
+	pci_free_resource_list(&bridge->windows);
+
+	kfree(bridge);
+}
+EXPORT_SYMBOL(pci_free_host_bridge);
+
 static const unsigned char pcix_bus_speed[] = {
 	PCI_SPEED_UNKNOWN,		/* 0 */
 	PCI_SPEED_66MHz_PCIX,		/* 1 */
@@ -719,7 +746,7 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
 	dev_set_msi_domain(&bus->dev, d);
 }
 
-int pci_register_host_bridge(struct pci_host_bridge *bridge)
+static int pci_register_host_bridge(struct pci_host_bridge *bridge)
 {
 	struct device *parent = bridge->dev.parent;
 	struct resource_entry *window, *n;
@@ -834,7 +861,6 @@ free:
 	kfree(bus);
 	return err;
 }
-EXPORT_SYMBOL(pci_register_host_bridge);
 
 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
 					   struct pci_dev *bridge, int busnr)
@@ -1330,6 +1356,34 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
 }
 
 /**
+ * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
+ * @dev: PCI device
+ *
+ * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev.  Check this
+ * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
+ */
+static int pci_intx_mask_broken(struct pci_dev *dev)
+{
+	u16 orig, toggle, new;
+
+	pci_read_config_word(dev, PCI_COMMAND, &orig);
+	toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
+	pci_write_config_word(dev, PCI_COMMAND, toggle);
+	pci_read_config_word(dev, PCI_COMMAND, &new);
+
+	pci_write_config_word(dev, PCI_COMMAND, orig);
+
+	/*
+	 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
+	 * r2.3, so strictly speaking, a device is not *broken* if it's not
+	 * writable.  But we'll live with the misnomer for now.
+	 */
+	if (new != toggle)
+		return 1;
+	return 0;
+}
+
+/**
  * pci_setup_device - fill in class and map information of a device
  * @dev: the device structure to fill
  *
@@ -1399,6 +1453,8 @@ int pci_setup_device(struct pci_dev *dev)
 		}
 	}
 
+	dev->broken_intx_masking = pci_intx_mask_broken(dev);
+
 	switch (dev->hdr_type) {		    /* header type */
 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
 		if (class == PCI_CLASS_BRIDGE_PCI)
@@ -1674,6 +1730,11 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
 	/* Initialize Advanced Error Capabilities and Control Register */
 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
+	/* Don't enable ECRC generation or checking if unsupported */
+	if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
+		reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
+	if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
+		reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
 
 	/*
@@ -2298,9 +2359,8 @@ void __weak pcibios_remove_bus(struct pci_bus *bus)
 {
 }
 
-static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
-		int bus, struct pci_ops *ops, void *sysdata,
-		struct list_head *resources, struct msi_controller *msi)
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+		struct pci_ops *ops, void *sysdata, struct list_head *resources)
 {
 	int error;
 	struct pci_host_bridge *bridge;
@@ -2310,13 +2370,11 @@ static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
 		return NULL;
 
 	bridge->dev.parent = parent;
-	bridge->dev.release = pci_release_host_bridge_dev;
 
 	list_splice_init(resources, &bridge->windows);
 	bridge->sysdata = sysdata;
 	bridge->busnr = bus;
 	bridge->ops = ops;
-	bridge->msi = msi;
 
 	error = pci_register_host_bridge(bridge);
 	if (error < 0)
@@ -2328,13 +2386,6 @@ err_out:
 	kfree(bridge);
 	return NULL;
 }
-
-struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
-		struct pci_ops *ops, void *sysdata, struct list_head *resources)
-{
-	return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources,
-				       NULL);
-}
 EXPORT_SYMBOL_GPL(pci_create_root_bus);
 
 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
@@ -2400,24 +2451,28 @@ void pci_bus_release_busn_res(struct pci_bus *b)
 			res, ret ? "can not be" : "is");
 }
 
-struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
-		struct pci_ops *ops, void *sysdata,
-		struct list_head *resources, struct msi_controller *msi)
+int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
 {
 	struct resource_entry *window;
 	bool found = false;
 	struct pci_bus *b;
-	int max;
+	int max, bus, ret;
 
-	resource_list_for_each_entry(window, resources)
+	if (!bridge)
+		return -EINVAL;
+
+	resource_list_for_each_entry(window, &bridge->windows)
 		if (window->res->flags & IORESOURCE_BUS) {
 			found = true;
 			break;
 		}
 
-	b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi);
-	if (!b)
-		return NULL;
+	ret = pci_register_host_bridge(bridge);
+	if (ret < 0)
+		return ret;
+
+	b = bridge->bus;
+	bus = bridge->busnr;
 
 	if (!found) {
 		dev_info(&b->dev,
@@ -2431,14 +2486,41 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
 	if (!found)
 		pci_bus_update_busn_res_end(b, max);
 
-	return b;
+	return 0;
 }
+EXPORT_SYMBOL(pci_scan_root_bus_bridge);
 
 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
 {
-	return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
-				     NULL);
+	struct resource_entry *window;
+	bool found = false;
+	struct pci_bus *b;
+	int max;
+
+	resource_list_for_each_entry(window, resources)
+		if (window->res->flags & IORESOURCE_BUS) {
+			found = true;
+			break;
+		}
+
+	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+	if (!b)
+		return NULL;
+
+	if (!found) {
+		dev_info(&b->dev,
+		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
+			bus);
+		pci_bus_insert_busn_res(b, bus, 255);
+	}
+
+	max = pci_scan_child_bus(b);
+
+	if (!found)
+		pci_bus_update_busn_res_end(b, max);
+
+	return b;
 }
 EXPORT_SYMBOL(pci_scan_root_bus);
 
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 085fb787aa9e..6967c6b4cf6b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -304,7 +304,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
 {
 	int i;
 
-	for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+	for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
 		struct resource *r = &dev->resource[i];
 
 		if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
@@ -1684,6 +1684,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2609, quirk_intel_pcie_pm);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x260a, quirk_intel_pcie_pm);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x260b, quirk_intel_pcie_pm);
 
+static void quirk_radeon_pm(struct pci_dev *dev)
+{
+	if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+	    dev->subsystem_device == 0x00e2) {
+		if (dev->d3_delay < 20) {
+			dev->d3_delay = 20;
+			dev_info(&dev->dev, "extending delay after power-on from D3 to %d msec\n",
+				 dev->d3_delay);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
+
 #ifdef CONFIG_X86_IO_APIC
 static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
 {
@@ -3236,6 +3249,10 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
 			quirk_broken_intx_masking);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
 			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b,
+			quirk_broken_intx_masking);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
 			quirk_broken_intx_masking);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index 95c225be49d1..81eda3d93a5d 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/ioport.h>
 #include <linux/cache.h>
+#include "pci.h"
 
 void __weak pcibios_update_irq(struct pci_dev *dev, int irq)
 {
@@ -22,12 +23,17 @@ void __weak pcibios_update_irq(struct pci_dev *dev, int irq)
 	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
 }
 
-static void pdev_fixup_irq(struct pci_dev *dev,
-			   u8 (*swizzle)(struct pci_dev *, u8 *),
-			   int (*map_irq)(const struct pci_dev *, u8, u8))
+void pci_assign_irq(struct pci_dev *dev)
 {
-	u8 pin, slot;
+	u8 pin;
+	u8 slot = -1;
 	int irq = 0;
+	struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
+
+	if (!(hbrg->map_irq)) {
+		dev_dbg(&dev->dev, "runtime IRQ mapping not provided by arch\n");
+		return;
+	}
 
 	/* If this device is not on the primary bus, we need to figure out
 	   which interrupt pin it will come in on.   We know which slot it
@@ -40,17 +46,22 @@ static void pdev_fixup_irq(struct pci_dev *dev,
 	if (pin > 4)
 		pin = 1;
 
-	if (pin != 0) {
+	if (pin) {
 		/* Follow the chain of bridges, swizzling as we go.  */
-		slot = (*swizzle)(dev, &pin);
+		if (hbrg->swizzle_irq)
+			slot = (*(hbrg->swizzle_irq))(dev, &pin);
 
-		irq = (*map_irq)(dev, slot, pin);
+		/*
+		 * If a swizzling function is not used map_irq must
+		 * ignore slot
+		 */
+		irq = (*(hbrg->map_irq))(dev, slot, pin);
 		if (irq == -1)
 			irq = 0;
 	}
 	dev->irq = irq;
 
-	dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq);
+	dev_dbg(&dev->dev, "assign IRQ: got %d\n", dev->irq);
 
 	/* Always tell the device, so the driver knows what is
 	   the real IRQ to use; the device does not use it. */
@@ -60,9 +71,23 @@ static void pdev_fixup_irq(struct pci_dev *dev,
 void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
 		    int (*map_irq)(const struct pci_dev *, u8, u8))
 {
+	/*
+	 * Implement pci_fixup_irqs() through pci_assign_irq().
+	 * This code should be remove eventually, it is a wrapper
+	 * around pci_assign_irq() interface to keep current
+	 * pci_fixup_irqs() behaviour unchanged on architecture
+	 * code still relying on its interface.
+	 */
 	struct pci_dev *dev = NULL;
+	struct pci_host_bridge *hbrg = NULL;
 
-	for_each_pci_dev(dev)
-		pdev_fixup_irq(dev, swizzle, map_irq);
+	for_each_pci_dev(dev) {
+		hbrg = pci_find_host_bridge(dev->bus);
+		hbrg->swizzle_irq = swizzle;
+		hbrg->map_irq = map_irq;
+		pci_assign_irq(dev);
+		hbrg->swizzle_irq = NULL;
+		hbrg->map_irq = NULL;
+	}
 }
 EXPORT_SYMBOL_GPL(pci_fixup_irqs);
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index f6a63406c76e..af81b2dec42e 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -120,6 +120,13 @@ struct sw_event_regs {
 	u32 reserved16[4];
 } __packed;
 
+enum {
+	SWITCHTEC_CFG0_RUNNING = 0x04,
+	SWITCHTEC_CFG1_RUNNING = 0x05,
+	SWITCHTEC_IMG0_RUNNING = 0x03,
+	SWITCHTEC_IMG1_RUNNING = 0x07,
+};
+
 struct sys_info_regs {
 	u32 device_id;
 	u32 device_version;
@@ -129,7 +136,9 @@ struct sys_info_regs {
 	u32 table_format_version;
 	u32 partition_id;
 	u32 cfg_file_fmt_version;
-	u32 reserved2[58];
+	u16 cfg_running;
+	u16 img_running;
+	u32 reserved2[57];
 	char vendor_id[8];
 	char product_id[16];
 	char product_revision[4];
@@ -807,6 +816,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
 {
 	struct switchtec_ioctl_flash_part_info info = {0};
 	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
+	struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
 	u32 active_addr = -1;
 
 	if (copy_from_user(&info, uinfo, sizeof(info)))
@@ -816,18 +826,26 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
 	case SWITCHTEC_IOCTL_PART_CFG0:
 		active_addr = ioread32(&fi->active_cfg);
 		set_fw_info_part(&info, &fi->cfg0);
+		if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
+			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
 		break;
 	case SWITCHTEC_IOCTL_PART_CFG1:
 		active_addr = ioread32(&fi->active_cfg);
 		set_fw_info_part(&info, &fi->cfg1);
+		if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
+			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
 		break;
 	case SWITCHTEC_IOCTL_PART_IMG0:
 		active_addr = ioread32(&fi->active_img);
 		set_fw_info_part(&info, &fi->img0);
+		if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
+			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
 		break;
 	case SWITCHTEC_IOCTL_PART_IMG1:
 		active_addr = ioread32(&fi->active_img);
 		set_fw_info_part(&info, &fi->img1);
+		if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
+			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
 		break;
 	case SWITCHTEC_IOCTL_PART_NVLOG:
 		set_fw_info_part(&info, &fi->nvlog);
@@ -861,7 +879,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
 	}
 
 	if (info.address == active_addr)
-		info.active = 1;
+		info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
 
 	if (copy_to_user(uinfo, &info, sizeof(info)))
 		return -EFAULT;
@@ -1540,6 +1558,24 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
 	SWITCHTEC_PCI_DEVICE(0x8544),  //PSX 64xG3
 	SWITCHTEC_PCI_DEVICE(0x8545),  //PSX 80xG3
 	SWITCHTEC_PCI_DEVICE(0x8546),  //PSX 96xG3
+	SWITCHTEC_PCI_DEVICE(0x8551),  //PAX 24XG3
+	SWITCHTEC_PCI_DEVICE(0x8552),  //PAX 32XG3
+	SWITCHTEC_PCI_DEVICE(0x8553),  //PAX 48XG3
+	SWITCHTEC_PCI_DEVICE(0x8554),  //PAX 64XG3
+	SWITCHTEC_PCI_DEVICE(0x8555),  //PAX 80XG3
+	SWITCHTEC_PCI_DEVICE(0x8556),  //PAX 96XG3
+	SWITCHTEC_PCI_DEVICE(0x8561),  //PFXL 24XG3
+	SWITCHTEC_PCI_DEVICE(0x8562),  //PFXL 32XG3
+	SWITCHTEC_PCI_DEVICE(0x8563),  //PFXL 48XG3
+	SWITCHTEC_PCI_DEVICE(0x8564),  //PFXL 64XG3
+	SWITCHTEC_PCI_DEVICE(0x8565),  //PFXL 80XG3
+	SWITCHTEC_PCI_DEVICE(0x8566),  //PFXL 96XG3
+	SWITCHTEC_PCI_DEVICE(0x8571),  //PFXI 24XG3
+	SWITCHTEC_PCI_DEVICE(0x8572),  //PFXI 32XG3
+	SWITCHTEC_PCI_DEVICE(0x8573),  //PFXI 48XG3
+	SWITCHTEC_PCI_DEVICE(0x8574),  //PFXI 64XG3
+	SWITCHTEC_PCI_DEVICE(0x8575),  //PFXI 80XG3
+	SWITCHTEC_PCI_DEVICE(0x8576),  //PFXI 96XG3
 	{0}
 };
 MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);