summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt35
-rw-r--r--Documentation/devicetree/bindings/display/msm/gpu.txt38
-rw-r--r--Documentation/gpu/drm-mm.rst15
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c451
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c17
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c3
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c10
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c3
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c949
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.h50
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c5
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c24
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h3
-rw-r--r--drivers/gpu/drm/drm_drv.c65
-rw-r--r--drivers/gpu/drm/drm_edid.c8
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c15
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c13
-rw-r--r--drivers/gpu/drm/drm_mm.c488
-rw-r--r--drivers/gpu/drm/drm_modes.c8
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c153
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h58
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c95
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c69
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c92
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c80
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.c12
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c3
-rw-r--r--drivers/gpu/drm/meson/Makefile6
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig7
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c62
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h51
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h269
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c25
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c97
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c254
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c239
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h20
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c169
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h11
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c1104
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c28
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h48
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c135
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c73
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c77
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c123
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h45
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c181
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h1
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c26
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c20
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h12
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c13
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c7
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h9
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c32
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c30
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c18
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c2
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig10
-rw-r--r--drivers/gpu/drm/rockchip/Makefile2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c1260
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h112
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c979
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.h483
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c17
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h1
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c158
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c6
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c4
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/gem.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c18
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c3
-rw-r--r--drivers/gpu/drm/vc4/Kconfig2
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c35
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h5
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c1725
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h5
-rw-r--r--drivers/gpu/drm/via/via_mm.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c10
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c2
-rw-r--r--include/drm/bridge/mhl.h85
-rw-r--r--include/drm/drm_color_mgmt.h27
-rw-r--r--include/drm/drm_drv.h13
-rw-r--r--include/drm/drm_fb_cma_helper.h7
-rw-r--r--include/drm/drm_fb_helper.h5
-rw-r--r--include/drm/drm_mm.h184
-rw-r--r--include/video/exynos5433_decon.h2
198 files changed, 10288 insertions, 2180 deletions
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
index e2768703ac2b..34c7fddcea39 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
+++ b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
@@ -56,6 +56,18 @@ Required properties for V3D:
 - interrupts:	The interrupt number
 		  See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
 
+Required properties for DSI:
+- compatible:	Should be "brcm,bcm2835-dsi0" or "brcm,bcm2835-dsi1"
+- reg:		Physical base address and length of the DSI block's registers
+- interrupts:	The interrupt number
+		  See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+- clocks:	a) phy: The DSI PLL clock feeding the DSI analog PHY
+		b) escape: The DSI ESC clock from CPRMAN
+		c) pixel: The DSI pixel clock from CPRMAN
+- clock-output-names:
+		The 3 clocks output from the DSI analog PHY: dsi[01]_byte,
+		dsi[01]_ddr2, and dsi[01]_ddr
+
 [1] Documentation/devicetree/bindings/media/video-interfaces.txt
 
 Example:
@@ -99,6 +111,29 @@ dpi: dpi@7e208000 {
 	};
 };
 
+dsi1: dsi@7e700000 {
+	compatible = "brcm,bcm2835-dsi1";
+	reg = <0x7e700000 0x8c>;
+	interrupts = <2 12>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	#clock-cells = <1>;
+
+	clocks = <&clocks BCM2835_PLLD_DSI1>,
+		 <&clocks BCM2835_CLOCK_DSI1E>,
+		 <&clocks BCM2835_CLOCK_DSI1P>;
+	clock-names = "phy", "escape", "pixel";
+
+	clock-output-names = "dsi1_byte", "dsi1_ddr2", "dsi1_ddr";
+
+	pitouchscreen: panel@0 {
+		compatible = "raspberrypi,touchscreen";
+		reg = <0>;
+
+		<...>
+	};
+};
+
 vec: vec@7e806000 {
 	compatible = "brcm,bcm2835-vec";
 	reg = <0x7e806000 0x1000>;
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index 67d0a58dbb77..43fac0fe09bb 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -1,23 +1,19 @@
 Qualcomm adreno/snapdragon GPU
 
 Required properties:
-- compatible: "qcom,adreno-3xx"
+- compatible: "qcom,adreno-XYZ.W", "qcom,adreno"
+    for example: "qcom,adreno-306.0", "qcom,adreno"
+  Note that you need to list the less specific "qcom,adreno" (since this
+  is what the device is matched on), in addition to the more specific
+  with the chip-id.
 - reg: Physical base address and length of the controller's registers.
 - interrupts: The interrupt signal from the gpu.
 - clocks: device clocks
   See ../clocks/clock-bindings.txt for details.
 - clock-names: the following clocks are required:
-  * "core_clk"
-  * "iface_clk"
-  * "mem_iface_clk"
-- qcom,chipid: gpu chip-id.  Note this may become optional for future
-  devices if we can reliably read the chipid from hw
-- qcom,gpu-pwrlevels: list of operating points
-  - compatible: "qcom,gpu-pwrlevels"
-  - for each qcom,gpu-pwrlevel:
-    - qcom,gpu-freq: requested gpu clock speed
-    - NOTE: downstream android driver defines additional parameters to
-      configure memory bandwidth scaling per OPP.
+  * "core"
+  * "iface"
+  * "mem_iface"
 
 Example:
 
@@ -25,28 +21,18 @@ Example:
 	...
 
 	gpu: qcom,kgsl-3d0@4300000 {
-		compatible = "qcom,adreno-3xx";
+		compatible = "qcom,adreno-320.2", "qcom,adreno";
 		reg = <0x04300000 0x20000>;
 		reg-names = "kgsl_3d0_reg_memory";
 		interrupts = <GIC_SPI 80 0>;
 		interrupt-names = "kgsl_3d0_irq";
 		clock-names =
-		    "core_clk",
-		    "iface_clk",
-		    "mem_iface_clk";
+		    "core",
+		    "iface",
+		    "mem_iface";
 		clocks =
 		    <&mmcc GFX3D_CLK>,
 		    <&mmcc GFX3D_AHB_CLK>,
 		    <&mmcc MMSS_IMEM_AHB_CLK>;
-		qcom,chipid = <0x03020100>;
-		qcom,gpu-pwrlevels {
-			compatible = "qcom,gpu-pwrlevels";
-			qcom,gpu-pwrlevel@0 {
-				qcom,gpu-freq = <450000000>;
-			};
-			qcom,gpu-pwrlevel@1 {
-				qcom,gpu-freq = <27000000>;
-			};
-		};
 	};
 };
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 91d82f39fbf4..f5760b140f13 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -291,10 +291,17 @@ To use :c:func:`drm_gem_mmap()`, drivers must fill the struct
 :c:type:`struct drm_driver <drm_driver>` gem_vm_ops field
 with a pointer to VM operations.
 
-struct vm_operations_struct \*gem_vm_ops struct
-vm_operations_struct { void (\*open)(struct vm_area_struct \* area);
-void (\*close)(struct vm_area_struct \* area); int (\*fault)(struct
-vm_area_struct \*vma, struct vm_fault \*vmf); };
+The VM operations is a :c:type:`struct vm_operations_struct <vm_operations_struct>`
+made up of several fields, the more interesting ones being:
+
+.. code-block:: c
+
+	struct vm_operations_struct {
+		void (*open)(struct vm_area_struct * area);
+		void (*close)(struct vm_area_struct * area);
+		int (*fault)(struct vm_fault *vmf);
+	};
+
 
 The open and close operations must update the GEM object reference
 count. Drivers can use the :c:func:`drm_gem_vm_open()` and
diff --git a/MAINTAINERS b/MAINTAINERS
index e4d5ff62f2ec..adfc60d9c456 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3970,6 +3970,7 @@ S:	Maintained
 L:	linux-media@vger.kernel.org
 L:	dri-devel@lists.freedesktop.org
 F:	drivers/dma-buf/sync_*
+F:	drivers/dma-buf/dma-fence*
 F:	drivers/dma-buf/sw_sync.c
 F:	include/linux/sync_file.h
 F:	include/uapi/linux/sync_file.h
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 94a64e3bc682..96ad79627dbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1709,6 +1709,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 		       u32 ip_instance, u32 ring,
 		       struct amdgpu_ring **out_ring);
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a5df1ef306d9..fa34dcae392f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -850,16 +850,37 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 				strcpy(fw_name, "amdgpu/fiji_smc.bin");
 				break;
 			case CHIP_POLARIS11:
-				if (type == CGS_UCODE_ID_SMU)
-					strcpy(fw_name, "amdgpu/polaris11_smc.bin");
-				else if (type == CGS_UCODE_ID_SMU_SK)
+				if (type == CGS_UCODE_ID_SMU) {
+					if (((adev->pdev->device == 0x67ef) &&
+					     ((adev->pdev->revision == 0xe0) ||
+					      (adev->pdev->revision == 0xe2) ||
+					      (adev->pdev->revision == 0xe5))) ||
+					    ((adev->pdev->device == 0x67ff) &&
+					     ((adev->pdev->revision == 0xcf) ||
+					      (adev->pdev->revision == 0xef) ||
+					      (adev->pdev->revision == 0xff))))
+						strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+					else
+						strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+				} else if (type == CGS_UCODE_ID_SMU_SK) {
 					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+				}
 				break;
 			case CHIP_POLARIS10:
-				if (type == CGS_UCODE_ID_SMU)
-					strcpy(fw_name, "amdgpu/polaris10_smc.bin");
-				else if (type == CGS_UCODE_ID_SMU_SK)
+				if (type == CGS_UCODE_ID_SMU) {
+					if ((adev->pdev->device == 0x67df) &&
+					    ((adev->pdev->revision == 0xe0) ||
+					     (adev->pdev->revision == 0xe3) ||
+					     (adev->pdev->revision == 0xe4) ||
+					     (adev->pdev->revision == 0xe5) ||
+					     (adev->pdev->revision == 0xe7) ||
+					     (adev->pdev->revision == 0xef)))
+						strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+					else
+						strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+				} else if (type == CGS_UCODE_ID_SMU_SK) {
 					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+				}
 				break;
 			case CHIP_POLARIS12:
 				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index cf2e8c4e9b8b..57301f5936fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -344,8 +344,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
  * submission. This can result in a debt that can stop buffer migrations
  * temporarily.
  */
-static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
-					 u64 num_bytes)
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
 {
 	spin_lock(&adev->mm_stats.lock);
 	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 838943d0962e..36ce3cac81ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -374,7 +374,6 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
 			&amdgpu_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
-				 adev->mode_info.num_crtc,
 				 AMDGPUFB_CONN_LIMIT);
 	if (ret) {
 		kfree(rfbdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 9bd1b4eae32e..ec7037a48b6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -487,67 +487,50 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
  *
  * @adev: amdgpu_device pointer
  * @bo_va: bo_va to update
+ * @list: validation list
+ * @operation: map or unmap
  *
- * Update the bo_va directly after setting it's address. Errors are not
+ * Update the bo_va directly after setting its address. Errors are not
  * vital here, so they are not reported back to userspace.
  */
 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 				    struct amdgpu_bo_va *bo_va,
+				    struct list_head *list,
 				    uint32_t operation)
 {
-	struct ttm_validate_buffer tv, *entry;
-	struct amdgpu_bo_list_entry vm_pd;
-	struct ww_acquire_ctx ticket;
-	struct list_head list, duplicates;
-	int r;
-
-	INIT_LIST_HEAD(&list);
-	INIT_LIST_HEAD(&duplicates);
-
-	tv.bo = &bo_va->bo->tbo;
-	tv.shared = true;
-	list_add(&tv.head, &list);
+	struct ttm_validate_buffer *entry;
+	int r = -ERESTARTSYS;
 
-	amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
-
-	/* Provide duplicates to avoid -EALREADY */
-	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
-	if (r)
-		goto error_print;
-
-	list_for_each_entry(entry, &list, head) {
+	list_for_each_entry(entry, list, head) {
 		struct amdgpu_bo *bo =
 			container_of(entry->bo, struct amdgpu_bo, tbo);
 
 		/* if anything is swapped out don't swap it in here,
 		   just abort and wait for the next CS */
 		if (!amdgpu_bo_gpu_accessible(bo))
-			goto error_unreserve;
+			goto error;
 
 		if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
-			goto error_unreserve;
+			goto error;
 	}
 
 	r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
 				      NULL);
 	if (r)
-		goto error_unreserve;
+		goto error;
 
 	r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
 	if (r)
-		goto error_unreserve;
+		goto error;
 
 	r = amdgpu_vm_clear_freed(adev, bo_va->vm);
 	if (r)
-		goto error_unreserve;
+		goto error;
 
 	if (operation == AMDGPU_VA_OP_MAP)
 		r = amdgpu_vm_bo_update(adev, bo_va, false);
 
-error_unreserve:
-	ttm_eu_backoff_reservation(&ticket, &list);
-
-error_print:
+error:
 	if (r && r != -ERESTARTSYS)
 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 }
@@ -564,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	struct amdgpu_bo_list_entry vm_pd;
 	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
-	struct list_head list, duplicates;
+	struct list_head list;
 	uint32_t invalid_flags, va_flags = 0;
 	int r = 0;
 
@@ -602,14 +585,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 	abo = gem_to_amdgpu_bo(gobj);
 	INIT_LIST_HEAD(&list);
-	INIT_LIST_HEAD(&duplicates);
 	tv.bo = &abo->tbo;
-	tv.shared = true;
+	tv.shared = false;
 	list_add(&tv.head, &list);
 
 	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
-	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
+	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
 	if (r) {
 		drm_gem_object_unreference_unlocked(gobj);
 		return r;
@@ -640,10 +622,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	default:
 		break;
 	}
-	ttm_eu_backoff_reservation(&ticket, &list);
 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
 	    !amdgpu_vm_debug)
-		amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
+		amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation);
+	ttm_eu_backoff_reservation(&ticket, &list);
 
 	drm_gem_object_unreference_unlocked(gobj);
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index e4eb6dd3798a..0335c2f331e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -97,8 +97,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 {
 	struct amdgpu_gtt_mgr *mgr = man->priv;
 	struct drm_mm_node *node = mem->mm_node;
-	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
-	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	enum drm_mm_insert_mode mode;
 	unsigned long fpfn, lpfn;
 	int r;
 
@@ -115,15 +114,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 	else
 		lpfn = man->size;
 
-	if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
-		sflags = DRM_MM_SEARCH_BELOW;
-		aflags = DRM_MM_CREATE_TOP;
-	}
+	mode = DRM_MM_INSERT_BEST;
+	if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
+		mode = DRM_MM_INSERT_HIGH;
 
 	spin_lock(&mgr->lock);
-	r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
-						mem->page_alignment, 0,
-						fpfn, lpfn, sflags, aflags);
+	r = drm_mm_insert_node_in_range(&mgr->mm, node,
+					mem->num_pages, mem->page_alignment, 0,
+					fpfn, lpfn, mode);
 	spin_unlock(&mgr->lock);
 
 	if (!r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index d1aa291b2638..be80a4a68d7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -323,6 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 	struct amdgpu_bo *bo;
 	enum ttm_bo_type type;
 	unsigned long page_align;
+	u64 initial_bytes_moved;
 	size_t acc_size;
 	int r;
 
@@ -374,8 +375,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 	 */
 
+#ifndef CONFIG_COMPILE_TEST
 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 	 thanks to write-combining
+#endif
 
 	if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
@@ -399,12 +402,20 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 		locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
 		WARN_ON(!locked);
 	}
+
+	initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
 			&bo->placement, page_align, !kernel, NULL,
 			acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
 			&amdgpu_ttm_bo_destroy);
-	if (unlikely(r != 0))
+	amdgpu_cs_report_moved_bytes(adev,
+		atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
+
+	if (unlikely(r != 0)) {
+		if (!resv)
+			ww_mutex_unlock(&bo->tbo.resv->lock);
 		return r;
+	}
 
 	bo->tbo.priority = ilog2(bo->tbo.num_pages);
 	if (kernel)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index a61882ddc804..95e026a4a2de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1142,12 +1142,22 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 			/* XXX select vce level based on ring/task */
 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 			mutex_unlock(&adev->pm.mutex);
+			amdgpu_pm_compute_clocks(adev);
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							AMD_PG_STATE_UNGATE);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							AMD_CG_STATE_UNGATE);
 		} else {
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							AMD_PG_STATE_GATE);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							AMD_CG_STATE_GATE);
 			mutex_lock(&adev->pm.mutex);
 			adev->pm.dpm.vce_active = false;
 			mutex_unlock(&adev->pm.mutex);
+			amdgpu_pm_compute_clocks(adev);
 		}
-		amdgpu_pm_compute_clocks(adev);
+
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 6f62ac473064..6d6ab7f11b4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1113,6 +1113,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 			amdgpu_dpm_enable_uvd(adev, false);
 		} else {
 			amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+			/* shutdown the UVD block */
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_PG_STATE_GATE);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_CG_STATE_GATE);
 		}
 	} else {
 		schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
@@ -1129,6 +1134,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 			amdgpu_dpm_enable_uvd(adev, true);
 		} else {
 			amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_CG_STATE_UNGATE);
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_PG_STATE_UNGATE);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 79bc9c7aad45..e2c06780ce49 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -321,6 +321,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
 			amdgpu_dpm_enable_vce(adev, false);
 		} else {
 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							    AMD_PG_STATE_GATE);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							    AMD_CG_STATE_GATE);
 		}
 	} else {
 		schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
@@ -346,6 +350,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
 			amdgpu_dpm_enable_vce(adev, true);
 		} else {
 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							    AMD_CG_STATE_UNGATE);
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							    AMD_PG_STATE_UNGATE);
+
 		}
 	}
 	mutex_unlock(&adev->vce.idle_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 3fd951c71d1b..dcfb7df3caf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -83,7 +83,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
 		amdgpu_vm_bo_rmv(adev, bo_va);
 		ttm_eu_backoff_reservation(&ticket, &list);
-		kfree(bo_va);
 		return r;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index ac9007986c11..9e577e3d3147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,8 +97,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 	struct amdgpu_vram_mgr *mgr = man->priv;
 	struct drm_mm *mm = &mgr->mm;
 	struct drm_mm_node *nodes;
-	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
-	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	enum drm_mm_insert_mode mode;
 	unsigned long lpfn, num_nodes, pages_per_node, pages_left;
 	unsigned i;
 	int r;
@@ -121,10 +120,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 	if (!nodes)
 		return -ENOMEM;
 
-	if (place->flags & TTM_PL_FLAG_TOPDOWN) {
-		sflags = DRM_MM_SEARCH_BELOW;
-		aflags = DRM_MM_CREATE_TOP;
-	}
+	mode = DRM_MM_INSERT_BEST;
+	if (place->flags & TTM_PL_FLAG_TOPDOWN)
+		mode = DRM_MM_INSERT_HIGH;
 
 	pages_left = mem->num_pages;
 
@@ -135,13 +133,11 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 
 		if (pages == pages_per_node)
 			alignment = pages_per_node;
-		else
-			sflags |= DRM_MM_SEARCH_BEST;
 
-		r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
-							alignment, 0,
-							place->fpfn, lpfn,
-							sflags, aflags);
+		r = drm_mm_insert_node_in_range(mm, &nodes[i],
+						pages, alignment, 0,
+						place->fpfn, lpfn,
+						mode);
 		if (unlikely(r))
 			goto error;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 9498e78b90d7..f97ecb49972e 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -2210,7 +2210,6 @@ static void ci_clear_vc(struct amdgpu_device *adev)
 
 static int ci_upload_firmware(struct amdgpu_device *adev)
 {
-	struct ci_power_info *pi = ci_get_pi(adev);
 	int i, ret;
 
 	if (amdgpu_ci_is_smc_running(adev)) {
@@ -2227,7 +2226,7 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
 	amdgpu_ci_stop_smc_clock(adev);
 	amdgpu_ci_reset_smc(adev);
 
-	ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
+	ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
 
 	return ret;
 
@@ -4257,12 +4256,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
 
 	if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
 		if (amdgpu_new_state->evclk) {
-			/* turn the clocks on when encoding */
-			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-							    AMD_CG_STATE_UNGATE);
-			if (ret)
-				return ret;
-
 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
 			tmp = RREG32_SMC(ixDPM_TABLE_475);
 			tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
@@ -4274,9 +4267,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
 			ret = ci_enable_vce_dpm(adev, false);
 			if (ret)
 				return ret;
-			/* turn the clocks off when not encoding */
-			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-							    AMD_CG_STATE_GATE);
 		}
 	}
 	return ret;
@@ -6278,13 +6268,13 @@ static int ci_dpm_sw_init(void *handle)
 	adev->pm.current_mclk = adev->clock.default_mclk;
 	adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
 
-	if (amdgpu_dpm == 0)
-		return 0;
-
 	ret = ci_dpm_init_microcode(adev);
 	if (ret)
 		return ret;
 
+	if (amdgpu_dpm == 0)
+		return 0;
+
 	INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
 	mutex_lock(&adev->pm.mutex);
 	ret = ci_dpm_init(adev);
@@ -6328,8 +6318,15 @@ static int ci_dpm_hw_init(void *handle)
 
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!amdgpu_dpm)
+	if (!amdgpu_dpm) {
+		ret = ci_upload_firmware(adev);
+		if (ret) {
+			DRM_ERROR("ci_upload_firmware failed\n");
+			return ret;
+		}
+		ci_dpm_start_smc(adev);
 		return 0;
+	}
 
 	mutex_lock(&adev->pm.mutex);
 	ci_dpm_setup_asic(adev);
@@ -6351,6 +6348,8 @@ static int ci_dpm_hw_fini(void *handle)
 		mutex_lock(&adev->pm.mutex);
 		ci_dpm_disable(adev);
 		mutex_unlock(&adev->pm.mutex);
+	} else {
+		ci_dpm_stop_smc(adev);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 7da688b0d27d..7c39b538dc0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1722,8 +1722,8 @@ static int cik_common_early_init(void *handle)
 			  AMD_PG_SUPPORT_GFX_SMG |
 			  AMD_PG_SUPPORT_GFX_DMG |*/
 			AMD_PG_SUPPORT_UVD |
-			/*AMD_PG_SUPPORT_VCE |
-			  AMD_PG_SUPPORT_CP |
+			AMD_PG_SUPPORT_VCE |
+			/*  AMD_PG_SUPPORT_CP |
 			  AMD_PG_SUPPORT_GDS |
 			  AMD_PG_SUPPORT_RLC_SMU_HS |
 			  AMD_PG_SUPPORT_ACP |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index c998f6aaaf36..2086e7e68de4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1325,21 +1325,19 @@ static u32 gfx_v6_0_create_bitmask(u32 bit_width)
 	return (u32)(((u64)1 << bit_width) - 1);
 }
 
-static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
-				    u32 max_rb_num_per_se,
-				    u32 sh_per_se)
+static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev)
 {
 	u32 data, mask;
 
-	data = RREG32(mmCC_RB_BACKEND_DISABLE);
-	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
-	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+	data = RREG32(mmCC_RB_BACKEND_DISABLE) |
+		RREG32(mmGC_USER_RB_BACKEND_DISABLE);
 
-	data >>= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+	data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
 
-	mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+	mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_backends_per_se/
+					adev->gfx.config.max_sh_per_se);
 
-	return data & mask;
+	return ~data & mask;
 }
 
 static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
@@ -1468,68 +1466,55 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
 	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 }
 
-static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
-			      u32 se_num, u32 sh_per_se,
-			      u32 max_rb_num_per_se)
+static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
 {
 	int i, j;
-	u32 data, mask;
-	u32 disabled_rbs = 0;
-	u32 enabled_rbs = 0;
+	u32 data;
+	u32 raster_config = 0;
+	u32 active_rbs = 0;
+	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
+					adev->gfx.config.max_sh_per_se;
 	unsigned num_rb_pipes;
 
 	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < se_num; i++) {
-		for (j = 0; j < sh_per_se; j++) {
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
-			data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
-			disabled_rbs |= data << ((i * sh_per_se + j) * 2);
+			data = gfx_v6_0_get_rb_active_bitmap(adev);
+			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
+					rb_bitmap_width_per_sh);
 		}
 	}
 	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
-
-	mask = 1;
-	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
-		if (!(disabled_rbs & mask))
-			enabled_rbs |= mask;
-		mask <<= 1;
-	}
 
-	adev->gfx.config.backend_enable_mask = enabled_rbs;
-	adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+	adev->gfx.config.backend_enable_mask = active_rbs;
+	adev->gfx.config.num_rbs = hweight32(active_rbs);
 
 	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
 			     adev->gfx.config.max_shader_engines, 16);
 
-	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < se_num; i++) {
-		gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
-		data = 0;
-		for (j = 0; j < sh_per_se; j++) {
-			switch (enabled_rbs & 3) {
-			case 1:
-				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
-				break;
-			case 2:
-				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
-				break;
-			case 3:
-			default:
-				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
-				break;
-			}
-			enabled_rbs >>= 2;
-		}
-		gfx_v6_0_raster_config(adev, &data);
+	gfx_v6_0_raster_config(adev, &raster_config);
 
-		if (!adev->gfx.config.backend_enable_mask ||
-				adev->gfx.config.num_rbs >= num_rb_pipes)
-			WREG32(mmPA_SC_RASTER_CONFIG, data);
-		else
-			gfx_v6_0_write_harvested_raster_configs(adev, data,
-								adev->gfx.config.backend_enable_mask,
-								num_rb_pipes);
+	if (!adev->gfx.config.backend_enable_mask ||
+			adev->gfx.config.num_rbs >= num_rb_pipes) {
+		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+	} else {
+		gfx_v6_0_write_harvested_raster_configs(adev, raster_config,
+							adev->gfx.config.backend_enable_mask,
+							num_rb_pipes);
+	}
+
+	/* cache the values for userspace */
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+			adev->gfx.config.rb_config[i][j].rb_backend_disable =
+				RREG32(mmCC_RB_BACKEND_DISABLE);
+			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+			adev->gfx.config.rb_config[i][j].raster_config =
+				RREG32(mmPA_SC_RASTER_CONFIG);
+		}
 	}
 	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 	mutex_unlock(&adev->grbm_idx_mutex);
@@ -1540,36 +1525,44 @@ static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
 }
 */
 
-static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
+static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+						 u32 bitmap)
 {
-	u32 data, mask;
+	u32 data;
 
-	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
-	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
-	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
+	if (!bitmap)
+		return;
 
-	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
 
-	mask = gfx_v6_0_create_bitmask(cu_per_sh);
+	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
 
-	return ~data & mask;
+static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev)
+{
+	u32 data, mask;
+
+	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
+		RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
+
+	mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
+	return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
 }
 
 
-static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
-			 u32 se_num, u32 sh_per_se,
-			 u32 cu_per_sh)
+static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
 {
 	int i, j, k;
 	u32 data, mask;
 	u32 active_cu = 0;
 
 	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < se_num; i++) {
-		for (j = 0; j < sh_per_se; j++) {
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
 			data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
-			active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
+			active_cu = gfx_v6_0_get_cu_enabled(adev);
 
 			mask = 1;
 			for (k = 0; k < 16; k++) {
@@ -1717,6 +1710,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
 		gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
 		break;
 	}
+	gb_addr_config &= ~GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK;
+	if (adev->gfx.config.max_shader_engines == 2)
+		gb_addr_config |= 1 << GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT;
 	adev->gfx.config.gb_addr_config = gb_addr_config;
 
 	WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
@@ -1735,13 +1731,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
 #endif
 	gfx_v6_0_tiling_mode_table_init(adev);
 
-	gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
-		    adev->gfx.config.max_sh_per_se,
-		    adev->gfx.config.max_backends_per_se);
+	gfx_v6_0_setup_rb(adev);
 
-	gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
-		     adev->gfx.config.max_sh_per_se,
-		     adev->gfx.config.max_cu_per_sh);
+	gfx_v6_0_setup_spi(adev);
 
 	gfx_v6_0_get_cu_info(adev);
 
@@ -2941,61 +2933,16 @@ static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
 	}
 }
 
-static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
-					 u32 se, u32 sh)
-{
-
-	u32 mask = 0, tmp, tmp1;
-	int i;
-
-	mutex_lock(&adev->grbm_idx_mutex);
-	gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
-	tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
-	tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
-	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
-
-	tmp &= 0xffff0000;
-
-	tmp |= tmp1;
-	tmp >>= 16;
-
-	for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
-		mask <<= 1;
-		mask |= 1;
-	}
-
-	return (~tmp) & mask;
-}
-
 static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
 {
-	u32 i, j, k, active_cu_number = 0;
-
-	u32 mask, counter, cu_bitmap;
-	u32 tmp = 0;
-
-	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
-		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-			mask = 1;
-			cu_bitmap = 0;
-			counter  = 0;
-			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
-				if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
-					if (counter < 2)
-						cu_bitmap |= mask;
-					counter++;
-				}
-				mask <<= 1;
-			}
+	u32 tmp;
 
-			active_cu_number += counter;
-			tmp |= (cu_bitmap << (i * 16 + j * 8));
-		}
-	}
+	WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
 
-	WREG32(mmRLC_PG_AO_CU_MASK, tmp);
-	WREG32_FIELD(RLC_MAX_PG_CU, MAX_POWERED_UP_CU, active_cu_number);
+	tmp = RREG32(mmRLC_MAX_PG_CU);
+	tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
+	tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
+	WREG32(mmRLC_MAX_PG_CU, tmp);
 }
 
 static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
@@ -3770,18 +3717,26 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
 	int i, j, k, counter, active_cu_number = 0;
 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+	unsigned disable_masks[4 * 2];
 
 	memset(cu_info, 0, sizeof(*cu_info));
 
+	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
+	mutex_lock(&adev->grbm_idx_mutex);
 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 			mask = 1;
 			ao_bitmap = 0;
 			counter = 0;
-			bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
+			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+			if (i < 4 && j < 2)
+				gfx_v6_0_set_user_cu_inactive_bitmap(
+					adev, disable_masks[i * 2 + j]);
+			bitmap = gfx_v6_0_get_cu_enabled(adev);
 			cu_info->bitmap[i][j] = bitmap;
 
-			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+			for (k = 0; k < 16; k++) {
 				if (bitmap & mask) {
 					if (counter < 2)
 						ao_bitmap |= mask;
@@ -3794,6 +3749,9 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
 		}
 	}
 
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 8785ca570729..f5a343cb0010 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1550,11 +1550,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
 
 	if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
 		kv_dpm_powergate_vce(adev, false);
-		/* turn the clocks on when encoding */
-		ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-						    AMD_CG_STATE_UNGATE);
-		if (ret)
-			return ret;
 		if (pi->caps_stable_p_state)
 			pi->vce_boot_level = table->count - 1;
 		else
@@ -1573,15 +1568,9 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
 			amdgpu_kv_send_msg_to_smc_with_parameter(adev,
 							  PPSMC_MSG_VCEDPM_SetEnabledMask,
 							  (1 << pi->vce_boot_level));
-
 		kv_enable_vce_dpm(adev, true);
 	} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
 		kv_enable_vce_dpm(adev, false);
-		/* turn the clocks off when not encoding */
-		ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-						    AMD_CG_STATE_GATE);
-		if (ret)
-			return ret;
 		kv_dpm_powergate_vce(adev, true);
 	}
 
@@ -1688,70 +1677,44 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
 	struct kv_power_info *pi = kv_get_pi(adev);
 	int ret;
 
-	if (pi->uvd_power_gated == gate)
-		return;
-
 	pi->uvd_power_gated = gate;
 
 	if (gate) {
-		if (pi->caps_uvd_pg) {
-			/* disable clockgating so we can properly shut down the block */
-			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
-							    AMD_CG_STATE_UNGATE);
-			/* shutdown the UVD block */
-			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
-							    AMD_PG_STATE_GATE);
-			/* XXX: check for errors */
-		}
+		/* stop the UVD block */
+		ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							AMD_PG_STATE_GATE);
 		kv_update_uvd_dpm(adev, gate);
 		if (pi->caps_uvd_pg)
 			/* power off the UVD block */
 			amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
 	} else {
-		if (pi->caps_uvd_pg) {
+		if (pi->caps_uvd_pg)
 			/* power on the UVD block */
 			amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
 			/* re-init the UVD block */
-			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
-							    AMD_PG_STATE_UNGATE);
-			/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
-			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
-							    AMD_CG_STATE_GATE);
-			/* XXX: check for errors */
-		}
 		kv_update_uvd_dpm(adev, gate);
+
+		ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							AMD_PG_STATE_UNGATE);
 	}
 }
 
 static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
 {
 	struct kv_power_info *pi = kv_get_pi(adev);
-	int ret;
 
 	if (pi->vce_power_gated == gate)
 		return;
 
 	pi->vce_power_gated = gate;
 
-	if (gate) {
-		if (pi->caps_vce_pg) {
-			/* shutdown the VCE block */
-			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-							    AMD_PG_STATE_GATE);
-			/* XXX: check for errors */
-			/* power off the VCE block */
-			amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
-		}
-	} else {
-		if (pi->caps_vce_pg) {
-			/* power on the VCE block */
-			amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
-			/* re-init the VCE block */
-			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-							    AMD_PG_STATE_UNGATE);
-			/* XXX: check for errors */
-		}
-	}
+	if (!pi->caps_vce_pg)
+		return;
+
+	if (gate)
+		amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+	else
+		amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
 }
 
 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
@@ -3009,8 +2972,7 @@ static int kv_dpm_late_init(void *handle)
 
 	kv_dpm_powergate_acp(adev, true);
 	kv_dpm_powergate_samu(adev, true);
-	kv_dpm_powergate_vce(adev, true);
-	kv_dpm_powergate_uvd(adev, true);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index da46992f7b18..b71e3faa40db 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1010,24 +1010,81 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
 	{PA_SC_RASTER_CONFIG, false, true},
 };
 
-static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
-					  u32 se_num, u32 sh_num,
-					  u32 reg_offset)
+static uint32_t si_get_register_value(struct amdgpu_device *adev,
+				      bool indexed, u32 se_num,
+				      u32 sh_num, u32 reg_offset)
 {
-	uint32_t val;
+	if (indexed) {
+		uint32_t val;
+		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+		switch (reg_offset) {
+		case mmCC_RB_BACKEND_DISABLE:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+		case mmGC_USER_RB_BACKEND_DISABLE:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+		case mmPA_SC_RASTER_CONFIG:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+		}
 
-	mutex_lock(&adev->grbm_idx_mutex);
-	if (se_num != 0xffffffff || sh_num != 0xffffffff)
-		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+		mutex_lock(&adev->grbm_idx_mutex);
+		if (se_num != 0xffffffff || sh_num != 0xffffffff)
+			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
-	val = RREG32(reg_offset);
+		val = RREG32(reg_offset);
 
-	if (se_num != 0xffffffff || sh_num != 0xffffffff)
-		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
-	return val;
+		if (se_num != 0xffffffff || sh_num != 0xffffffff)
+			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+		mutex_unlock(&adev->grbm_idx_mutex);
+		return val;
+	} else {
+		unsigned idx;
+
+		switch (reg_offset) {
+		case mmGB_ADDR_CONFIG:
+			return adev->gfx.config.gb_addr_config;
+		case mmMC_ARB_RAMCFG:
+			return adev->gfx.config.mc_arb_ramcfg;
+		case mmGB_TILE_MODE0:
+		case mmGB_TILE_MODE1:
+		case mmGB_TILE_MODE2:
+		case mmGB_TILE_MODE3:
+		case mmGB_TILE_MODE4:
+		case mmGB_TILE_MODE5:
+		case mmGB_TILE_MODE6:
+		case mmGB_TILE_MODE7:
+		case mmGB_TILE_MODE8:
+		case mmGB_TILE_MODE9:
+		case mmGB_TILE_MODE10:
+		case mmGB_TILE_MODE11:
+		case mmGB_TILE_MODE12:
+		case mmGB_TILE_MODE13:
+		case mmGB_TILE_MODE14:
+		case mmGB_TILE_MODE15:
+		case mmGB_TILE_MODE16:
+		case mmGB_TILE_MODE17:
+		case mmGB_TILE_MODE18:
+		case mmGB_TILE_MODE19:
+		case mmGB_TILE_MODE20:
+		case mmGB_TILE_MODE21:
+		case mmGB_TILE_MODE22:
+		case mmGB_TILE_MODE23:
+		case mmGB_TILE_MODE24:
+		case mmGB_TILE_MODE25:
+		case mmGB_TILE_MODE26:
+		case mmGB_TILE_MODE27:
+		case mmGB_TILE_MODE28:
+		case mmGB_TILE_MODE29:
+		case mmGB_TILE_MODE30:
+		case mmGB_TILE_MODE31:
+			idx = (reg_offset - mmGB_TILE_MODE0);
+			return adev->gfx.config.tile_mode_array[idx];
+		default:
+			return RREG32(reg_offset);
+		}
+	}
 }
-
 static int si_read_register(struct amdgpu_device *adev, u32 se_num,
 			     u32 sh_num, u32 reg_offset, u32 *value)
 {
@@ -1039,10 +1096,9 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num,
 			continue;
 
 		if (!si_allowed_read_registers[i].untouched)
-			*value = si_allowed_read_registers[i].grbm_indexed ?
-				 si_read_indexed_register(adev, se_num,
-							   sh_num, reg_offset) :
-				 RREG32(reg_offset);
+			*value = si_get_register_value(adev,
+						si_allowed_read_registers[i].grbm_indexed,
+						se_num, sh_num, reg_offset);
 		return 0;
 	}
 	return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
index fde2086246fa..dc9e0e6b4558 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -143,8 +143,8 @@
 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
 
 #define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
-#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
-#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02010001
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x02010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02011003
 
 #define PACKET3(op, n)  ((RADEON_PACKET_TYPE3 << 30) |                  \
                          (((op) & 0xFF) << 8) |                         \
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 7fb9137dd89b..f15df99f0a06 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -159,9 +159,6 @@ static int uvd_v4_2_hw_init(void *handle)
 
 	uvd_v4_2_enable_mgcg(adev, true);
 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
-	r = uvd_v4_2_start(adev);
-	if (r)
-		goto done;
 
 	ring->ready = true;
 	r = amdgpu_ring_test_ring(ring);
@@ -198,7 +195,6 @@ static int uvd_v4_2_hw_init(void *handle)
 	amdgpu_ring_commit(ring);
 
 done:
-
 	if (!r)
 		DRM_INFO("UVD initialized successfully.\n");
 
@@ -217,7 +213,9 @@ static int uvd_v4_2_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 
-	uvd_v4_2_stop(adev);
+	if (RREG32(mmUVD_STATUS) != 0)
+		uvd_v4_2_stop(adev);
+
 	ring->ready = false;
 
 	return 0;
@@ -267,37 +265,26 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 	uint32_t rb_bufsz;
 	int i, j, r;
+	u32 tmp;
 	/* disable byte swapping */
 	u32 lmi_swap_cntl = 0;
 	u32 mp_swap_cntl = 0;
 
-	WREG32(mmUVD_CGC_GATE, 0);
-	uvd_v4_2_set_dcm(adev, true);
+	/* set uvd busy */
+	WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
 
-	uvd_v4_2_mc_resume(adev);
-
-	/* disable interupt */
-	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
-
-	/* Stall UMC and register bus before resetting VCPU */
-	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
-	mdelay(1);
-
-	/* put LMI, VCPU, RBC etc... into reset */
-	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
-	mdelay(5);
+	uvd_v4_2_set_dcm(adev, true);
+	WREG32(mmUVD_CGC_GATE, 0);
 
 	/* take UVD block out of reset */
 	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 	mdelay(5);
 
-	/* initialize UVD memory controller */
-	WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
-			     (1 << 21) | (1 << 9) | (1 << 20));
+	/* enable VCPU clock */
+	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
+
+	/* disable interupt */
+	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 
 #ifdef __BIG_ENDIAN
 	/* swap (8 in 32) RB and IB */
@@ -306,6 +293,11 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
 #endif
 	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+	/* initialize UVD memory controller */
+	WREG32(mmUVD_LMI_CTRL, 0x203108);
+
+	tmp = RREG32(mmUVD_MPC_CNTL);
+	WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
 
 	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
@@ -314,18 +306,20 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
 	WREG32(mmUVD_MPC_SET_ALU, 0);
 	WREG32(mmUVD_MPC_SET_MUX, 0x88);
 
-	/* take all subblocks out of reset, except VCPU */
-	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
-	mdelay(5);
+	uvd_v4_2_mc_resume(adev);
 
-	/* enable VCPU clock */
-	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
+	tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
+	WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
 
 	/* enable UMC */
 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 
-	/* boot up the VCPU */
-	WREG32(mmUVD_SOFT_RESET, 0);
+	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
+
+	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+
+	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+
 	mdelay(10);
 
 	for (i = 0; i < 10; ++i) {
@@ -357,6 +351,8 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
 	/* enable interupt */
 	WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
 
+	WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
+
 	/* force RBC into idle state */
 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 
@@ -393,22 +389,54 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
  */
 static void uvd_v4_2_stop(struct amdgpu_device *adev)
 {
-	/* force RBC into idle state */
+	uint32_t i, j;
+	uint32_t status;
+
 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(mmUVD_STATUS);
+			if (status & 2)
+				break;
+			mdelay(1);
+		}
+		break;
+	}
+
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(mmUVD_LMI_STATUS);
+			if (status & 0xf)
+				break;
+			mdelay(1);
+		}
+		break;
+	}
+
 	/* Stall UMC and register bus before resetting VCPU */
 	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
-	mdelay(1);
 
-	/* put VCPU into reset */
-	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
-	mdelay(5);
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(mmUVD_LMI_STATUS);
+			if (status & 0x240)
+				break;
+			mdelay(1);
+		}
+		break;
+	}
 
-	/* disable VCPU clock */
-	WREG32(mmUVD_VCPU_CNTL, 0x0);
+	WREG32_P(0x3D49, 0, ~(1 << 2));
 
-	/* Unstall UMC and register bus */
-	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+	WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
+
+	/* put LMI, VCPU, RBC etc... into reset */
+	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+
+	WREG32(mmUVD_STATUS, 0);
 
 	uvd_v4_2_set_dcm(adev, false);
 }
@@ -694,8 +722,24 @@ static int uvd_v4_2_set_powergating_state(void *handle,
 
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v4_2_stop(adev);
+		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
+			if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4)) {
+				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
+							UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
+							UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
+				mdelay(20);
+			}
+		}
 		return 0;
 	} else {
+		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
+			if (RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4) {
+				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
+						UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
+						UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
+				mdelay(30);
+			}
+		}
 		return uvd_v4_2_start(adev);
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 9b49824233ae..46e715193924 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle)
 	uint32_t tmp;
 	int r;
 
-	r = uvd_v5_0_start(adev);
-	if (r)
-		goto done;
+	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+	uvd_v5_0_enable_mgcg(adev, true);
 
 	ring->ready = true;
 	r = amdgpu_ring_test_ring(ring);
@@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle)
 	amdgpu_ring_write(ring, 3);
 
 	amdgpu_ring_commit(ring);
+
 done:
 	if (!r)
 		DRM_INFO("UVD initialized successfully.\n");
 
 	return r;
+
 }
 
 /**
@@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 
-	uvd_v5_0_stop(adev);
+	if (RREG32(mmUVD_STATUS) != 0)
+		uvd_v5_0_stop(adev);
+
 	ring->ready = false;
 
 	return 0;
@@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
 
 	uvd_v5_0_mc_resume(adev);
 
-	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
-	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
-	uvd_v5_0_enable_mgcg(adev, true);
-
 	/* disable interupt */
 	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 
@@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev)
 
 	/* Unstall UMC and register bus */
 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+	WREG32(mmUVD_STATUS, 0);
 }
 
 /**
@@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 
-	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
-		return 0;
-
 	if (enable) {
 		/* wait for STATUS to clear */
 		if (uvd_v5_0_wait_for_idle(handle))
@@ -824,9 +823,6 @@ static int uvd_v5_0_set_powergating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int ret = 0;
 
-	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
-		return 0;
-
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v5_0_stop(adev);
 		adev->uvd.is_powergated = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index de7e03544d00..af83ab8c1250 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle)
 	uint32_t tmp;
 	int r;
 
-	r = uvd_v6_0_start(adev);
-	if (r)
-		goto done;
+	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+	uvd_v6_0_enable_mgcg(adev, true);
 
 	ring->ready = true;
 	r = amdgpu_ring_test_ring(ring);
@@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 
-	uvd_v6_0_stop(adev);
+	if (RREG32(mmUVD_STATUS) != 0)
+		uvd_v6_0_stop(adev);
+
 	ring->ready = false;
 
 	return 0;
@@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
 	lmi_swap_cntl = 0;
 	mp_swap_cntl = 0;
 
-	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
-	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
-	uvd_v6_0_enable_mgcg(adev, true);
 	uvd_v6_0_mc_resume(adev);
 
 	/* disable interupt */
@@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev)
 
 	/* Unstall UMC and register bus */
 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+	WREG32(mmUVD_STATUS, 0);
 }
 
 /**
@@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 
-	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
-		return 0;
-
 	if (enable) {
 		/* wait for STATUS to clear */
 		if (uvd_v6_0_wait_for_idle(handle))
@@ -1049,9 +1047,6 @@ static int uvd_v6_0_set_powergating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int ret = 0;
 
-	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
-		return 0;
-
 	WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
 
 	if (state == AMD_PG_STATE_GATE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 38ed903dd6f8..9ea99348e493 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -42,10 +42,9 @@
 #define VCE_V2_0_DATA_SIZE	(23552 * AMDGPU_MAX_VCE_HANDLES)
 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
 
-static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vce_v2_0_wait_for_idle(void *handle);
+
 /**
  * vce_v2_0_ring_get_rptr - get read pointer
  *
@@ -140,6 +139,86 @@ static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
 	return -ETIMEDOUT;
 }
 
+static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
+{
+	WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
+}
+
+static void vce_v2_0_init_cg(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	tmp = RREG32(mmVCE_CLOCK_GATING_A);
+	tmp &= ~0xfff;
+	tmp |= ((0 << 0) | (4 << 4));
+	tmp |= 0x40000;
+	WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+	tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+	tmp &= ~0xfff;
+	tmp |= ((0 << 0) | (4 << 4));
+	WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+	tmp = RREG32(mmVCE_CLOCK_GATING_B);
+	tmp |= 0x10;
+	tmp &= ~0x100000;
+	WREG32(mmVCE_CLOCK_GATING_B, tmp);
+}
+
+static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
+{
+	uint64_t addr = adev->vce.gpu_addr;
+	uint32_t size;
+
+	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
+	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+
+	WREG32(mmVCE_LMI_CTRL, 0x00398000);
+	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
+	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
+	WREG32(mmVCE_LMI_VM_CTRL, 0);
+
+	addr += AMDGPU_VCE_FIRMWARE_OFFSET;
+	size = VCE_V2_0_FW_SIZE;
+	WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
+	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
+
+	addr += size;
+	size = VCE_V2_0_STACK_SIZE;
+	WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
+	WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
+
+	addr += size;
+	size = VCE_V2_0_DATA_SIZE;
+	WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
+	WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
+
+	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
+}
+
+static bool vce_v2_0_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+}
+
+static int vce_v2_0_wait_for_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	unsigned i;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (vce_v2_0_is_idle(handle))
+			return 0;
+	}
+	return -ETIMEDOUT;
+}
+
 /**
  * vce_v2_0_start - start VCE block
  *
@@ -152,11 +231,14 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring;
 	int r;
 
-	vce_v2_0_mc_resume(adev);
-
 	/* set BUSY flag */
 	WREG32_P(mmVCE_STATUS, 1, ~1);
 
+	vce_v2_0_init_cg(adev);
+	vce_v2_0_disable_cg(adev);
+
+	vce_v2_0_mc_resume(adev);
+
 	ring = &adev->vce.ring[0];
 	WREG32(mmVCE_RB_RPTR, ring->wptr);
 	WREG32(mmVCE_RB_WPTR, ring->wptr);
@@ -189,6 +271,145 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
 	return 0;
 }
 
+static int vce_v2_0_stop(struct amdgpu_device *adev)
+{
+	int i, j;
+	int status;
+
+	if (vce_v2_0_lmi_clean(adev)) {
+		DRM_INFO("vce is not idle \n");
+		return 0;
+	}
+/*
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(mmVCE_FW_REG_STATUS);
+			if (!(status & 1))
+				break;
+			mdelay(1);
+		}
+		break;
+	}
+*/
+	if (vce_v2_0_wait_for_idle(adev)) {
+		DRM_INFO("VCE is busy, Can't set clock gateing");
+		return 0;
+	}
+
+	/* Stall UMC and register bus before resetting VCPU */
+	WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
+
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(mmVCE_LMI_STATUS);
+			if (status & 0x240)
+				break;
+			mdelay(1);
+		}
+		break;
+	}
+
+	WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
+
+	/* put LMI, VCPU, RBC etc... into reset */
+	WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
+
+	WREG32(mmVCE_STATUS, 0);
+
+	return 0;
+}
+
+static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
+{
+	u32 tmp;
+
+	if (gated) {
+		tmp = RREG32(mmVCE_CLOCK_GATING_B);
+		tmp |= 0xe70000;
+		WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+		tmp |= 0xff000000;
+		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+		tmp &= ~0x3fc;
+		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+
+		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+	} else {
+		tmp = RREG32(mmVCE_CLOCK_GATING_B);
+		tmp |= 0xe7;
+		tmp &= ~0xe70000;
+		WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+		tmp |= 0x1fe000;
+		tmp &= ~0xff000000;
+		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+		tmp |= 0x3fc;
+		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+	}
+}
+
+static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
+{
+	u32 orig, tmp;
+
+/* LMI_MC/LMI_UMC always set in dynamic,
+ * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
+ */
+	tmp = RREG32(mmVCE_CLOCK_GATING_B);
+	tmp &= ~0x00060006;
+
+/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
+	if (gated) {
+		tmp |= 0xe10000;
+		WREG32(mmVCE_CLOCK_GATING_B, tmp);
+	} else {
+		tmp |= 0xe1;
+		tmp &= ~0xe10000;
+		WREG32(mmVCE_CLOCK_GATING_B, tmp);
+	}
+
+	orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+	tmp &= ~0x1fe000;
+	tmp &= ~0xff000000;
+	if (tmp != orig)
+		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+	orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+	tmp &= ~0x3fc;
+	if (tmp != orig)
+		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+
+	/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
+	WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
+
+	if(gated)
+		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+}
+
+static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
+								bool sw_cg)
+{
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
+		if (sw_cg)
+			vce_v2_0_set_sw_cg(adev, true);
+		else
+			vce_v2_0_set_dyn_cg(adev, true);
+	} else {
+		vce_v2_0_disable_cg(adev);
+
+		if (sw_cg)
+			vce_v2_0_set_sw_cg(adev, false);
+		else
+			vce_v2_0_set_dyn_cg(adev, false);
+	}
+}
+
 static int vce_v2_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -254,11 +475,8 @@ static int vce_v2_0_hw_init(void *handle)
 	int r, i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	r = vce_v2_0_start(adev);
-	/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
-	if (r)
-		return 0;
-
+	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+	vce_v2_0_enable_mgcg(adev, true, false);
 	for (i = 0; i < adev->vce.num_rings; i++)
 		adev->vce.ring[i].ready = false;
 
@@ -312,190 +530,6 @@ static int vce_v2_0_resume(void *handle)
 	return r;
 }
 
-static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
-{
-	u32 tmp;
-
-	if (gated) {
-		tmp = RREG32(mmVCE_CLOCK_GATING_B);
-		tmp |= 0xe70000;
-		WREG32(mmVCE_CLOCK_GATING_B, tmp);
-
-		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
-		tmp |= 0xff000000;
-		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
-		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-		tmp &= ~0x3fc;
-		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
-
-		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
-	} else {
-		tmp = RREG32(mmVCE_CLOCK_GATING_B);
-		tmp |= 0xe7;
-		tmp &= ~0xe70000;
-		WREG32(mmVCE_CLOCK_GATING_B, tmp);
-
-		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
-		tmp |= 0x1fe000;
-		tmp &= ~0xff000000;
-		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
-		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-		tmp |= 0x3fc;
-		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
-	}
-}
-
-static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
-{
-	if (vce_v2_0_wait_for_idle(adev)) {
-		DRM_INFO("VCE is busy, Can't set clock gateing");
-		return;
-	}
-
-	WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
-
-	if (vce_v2_0_lmi_clean(adev)) {
-		DRM_INFO("LMI is busy, Can't set clock gateing");
-		return;
-	}
-
-	WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-	WREG32_P(mmVCE_SOFT_RESET,
-		 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-		 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-	WREG32(mmVCE_STATUS, 0);
-
-	if (gated)
-		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
-	/* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
-	if (gated) {
-		/* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
-		WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
-	} else {
-		/* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
-		WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
-	}
-
-	/* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
-	WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
-
-	/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
-	WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
-
-	WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
-	if(!gated) {
-		WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-		mdelay(100);
-		WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
-		vce_v2_0_firmware_loaded(adev);
-		WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
-	}
-}
-
-static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
-{
-	WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
-}
-
-static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
-{
-	bool sw_cg = false;
-
-	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
-		if (sw_cg)
-			vce_v2_0_set_sw_cg(adev, true);
-		else
-			vce_v2_0_set_dyn_cg(adev, true);
-	} else {
-		vce_v2_0_disable_cg(adev);
-
-		if (sw_cg)
-			vce_v2_0_set_sw_cg(adev, false);
-		else
-			vce_v2_0_set_dyn_cg(adev, false);
-	}
-}
-
-static void vce_v2_0_init_cg(struct amdgpu_device *adev)
-{
-	u32 tmp;
-
-	tmp = RREG32(mmVCE_CLOCK_GATING_A);
-	tmp &= ~0xfff;
-	tmp |= ((0 << 0) | (4 << 4));
-	tmp |= 0x40000;
-	WREG32(mmVCE_CLOCK_GATING_A, tmp);
-
-	tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
-	tmp &= ~0xfff;
-	tmp |= ((0 << 0) | (4 << 4));
-	WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
-	tmp = RREG32(mmVCE_CLOCK_GATING_B);
-	tmp |= 0x10;
-	tmp &= ~0x100000;
-	WREG32(mmVCE_CLOCK_GATING_B, tmp);
-}
-
-static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
-{
-	uint64_t addr = adev->vce.gpu_addr;
-	uint32_t size;
-
-	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
-	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
-	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
-	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
-
-	WREG32(mmVCE_LMI_CTRL, 0x00398000);
-	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
-	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
-	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
-	WREG32(mmVCE_LMI_VM_CTRL, 0);
-
-	addr += AMDGPU_VCE_FIRMWARE_OFFSET;
-	size = VCE_V2_0_FW_SIZE;
-	WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
-	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
-
-	addr += size;
-	size = VCE_V2_0_STACK_SIZE;
-	WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
-	WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
-
-	addr += size;
-	size = VCE_V2_0_DATA_SIZE;
-	WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
-	WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
-
-	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
-
-	vce_v2_0_init_cg(adev);
-}
-
-static bool vce_v2_0_is_idle(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
-}
-
-static int vce_v2_0_wait_for_idle(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	unsigned i;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		if (vce_v2_0_is_idle(handle))
-			return 0;
-	}
-	return -ETIMEDOUT;
-}
-
 static int vce_v2_0_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -539,33 +573,20 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
 	return 0;
 }
 
-static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
-	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
-	if (enable)
-		tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-	else
-		tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-
-	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
-
 static int vce_v2_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	bool gate = false;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
-
+	bool sw_cg = false;
 
-	vce_v2_0_set_bypass_mode(adev, enable);
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (state == AMD_CG_STATE_GATE)
+	if (state == AMD_CG_STATE_GATE) {
 		gate = true;
+		sw_cg = true;
+	}
 
-	vce_v2_0_enable_mgcg(adev, gate);
+	vce_v2_0_enable_mgcg(adev, gate, sw_cg);
 
 	return 0;
 }
@@ -582,12 +603,8 @@ static int vce_v2_0_set_powergating_state(void *handle,
 	 */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
-		return 0;
-
 	if (state == AMD_PG_STATE_GATE)
-		/* XXX do we need a vce_v2_0_stop()? */
-		return 0;
+		return vce_v2_0_stop(adev);
 	else
 		return vce_v2_0_start(adev);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 8db26559fd1b..a8c40eebdd78 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -230,10 +230,6 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring;
 	int idx, r;
 
-	vce_v3_0_override_vce_clock_gating(adev, true);
-	if (!(adev->flags & AMD_IS_APU))
-		amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
-
 	ring = &adev->vce.ring[0];
 	WREG32(mmVCE_RB_RPTR, ring->wptr);
 	WREG32(mmVCE_RB_WPTR, ring->wptr);
@@ -436,9 +432,9 @@ static int vce_v3_0_hw_init(void *handle)
 	int r, i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	r = vce_v3_0_start(adev);
-	if (r)
-		return r;
+	vce_v3_0_override_vce_clock_gating(adev, true);
+	if (!(adev->flags & AMD_IS_APU))
+		amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 
 	for (i = 0; i < adev->vce.num_rings; i++)
 		adev->vce.ring[i].ready = false;
@@ -766,12 +762,11 @@ static int vce_v3_0_set_powergating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int ret = 0;
 
-	if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
-		return 0;
-
 	if (state == AMD_PG_STATE_GATE) {
+		ret = vce_v3_0_stop(adev);
+		if (ret)
+			goto out;
 		adev->vce.is_powergated = true;
-		/* XXX do we need a vce_v3_0_stop()? */
 	} else {
 		ret = vce_v3_0_start(adev);
 		if (ret)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
index f9fd2ea4625b..dbc2e723f659 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
@@ -1310,5 +1310,6 @@
 #define ixROM_SW_DATA_62                                                        0xc060012c
 #define ixROM_SW_DATA_63                                                        0xc0600130
 #define ixROM_SW_DATA_64                                                        0xc0600134
+#define ixCURRENT_PG_STATUS                                                     0xc020029c
 
 #endif /* SMU_7_0_1_D_H */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index 3eccac735db3..b33935fcf428 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -161,28 +161,25 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 
-	if (cz_hwmgr->uvd_power_gated == bgate)
-		return 0;
-
 	cz_hwmgr->uvd_power_gated = bgate;
 
 	if (bgate) {
-		cgs_set_clockgating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_CG_STATE_GATE);
 		cgs_set_powergating_state(hwmgr->device,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_PG_STATE_GATE);
+		cgs_set_clockgating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_CG_STATE_GATE);
 		cz_dpm_update_uvd_dpm(hwmgr, true);
 		cz_dpm_powerdown_uvd(hwmgr);
 	} else {
 		cz_dpm_powerup_uvd(hwmgr);
-		cgs_set_powergating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_CG_STATE_UNGATE);
 		cgs_set_clockgating_state(hwmgr->device,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_PG_STATE_UNGATE);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_CG_STATE_UNGATE);
 		cz_dpm_update_uvd_dpm(hwmgr, false);
 	}
 
@@ -193,47 +190,34 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_VCEPowerGating)) {
-		if (cz_hwmgr->vce_power_gated != bgate) {
-			if (bgate) {
-				cgs_set_clockgating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_CG_STATE_GATE);
-				cgs_set_powergating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_PG_STATE_GATE);
-				cz_enable_disable_vce_dpm(hwmgr, false);
-				cz_dpm_powerdown_vce(hwmgr);
-				cz_hwmgr->vce_power_gated = true;
-			} else {
-				cz_dpm_powerup_vce(hwmgr);
-				cz_hwmgr->vce_power_gated = false;
-				cgs_set_powergating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_CG_STATE_UNGATE);
-				cgs_set_clockgating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_PG_STATE_UNGATE);
-				cz_dpm_update_vce_dpm(hwmgr);
-				cz_enable_disable_vce_dpm(hwmgr, true);
-				return 0;
-			}
-		}
+	if (bgate) {
+		cgs_set_powergating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_PG_STATE_GATE);
+		cgs_set_clockgating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_CG_STATE_GATE);
+		cz_enable_disable_vce_dpm(hwmgr, false);
+		cz_dpm_powerdown_vce(hwmgr);
+		cz_hwmgr->vce_power_gated = true;
 	} else {
-		cz_hwmgr->vce_power_gated = bgate;
+		cz_dpm_powerup_vce(hwmgr);
+		cz_hwmgr->vce_power_gated = false;
+		cgs_set_clockgating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_PG_STATE_UNGATE);
+		cgs_set_powergating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_CG_STATE_UNGATE);
 		cz_dpm_update_vce_dpm(hwmgr);
-		cz_enable_disable_vce_dpm(hwmgr, !bgate);
+		cz_enable_disable_vce_dpm(hwmgr, true);
 		return 0;
 	}
 
-	if (!cz_hwmgr->vce_power_gated)
-		cz_dpm_update_vce_dpm(hwmgr);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index a1fc4fcac1e0..8cf71f3c6d0e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -147,22 +147,22 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 	data->uvd_power_gated = bgate;
 
 	if (bgate) {
-		cgs_set_clockgating_state(hwmgr->device,
-				AMD_IP_BLOCK_TYPE_UVD,
-				AMD_CG_STATE_GATE);
 		cgs_set_powergating_state(hwmgr->device,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_PG_STATE_GATE);
+		cgs_set_clockgating_state(hwmgr->device,
+				AMD_IP_BLOCK_TYPE_UVD,
+				AMD_CG_STATE_GATE);
 		smu7_update_uvd_dpm(hwmgr, true);
 		smu7_powerdown_uvd(hwmgr);
 	} else {
 		smu7_powerup_uvd(hwmgr);
-		cgs_set_powergating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_CG_STATE_UNGATE);
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_UVD,
 				AMD_CG_STATE_UNGATE);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_CG_STATE_UNGATE);
 		smu7_update_uvd_dpm(hwmgr, false);
 	}
 
@@ -173,12 +173,12 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
-	if (data->vce_power_gated == bgate)
-		return 0;
-
 	data->vce_power_gated = bgate;
 
 	if (bgate) {
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_VCE,
+						AMD_PG_STATE_UNGATE);
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_CG_STATE_GATE);
@@ -186,10 +186,13 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 		smu7_powerdown_vce(hwmgr);
 	} else {
 		smu7_powerup_vce(hwmgr);
-		smu7_update_vce_dpm(hwmgr, false);
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_CG_STATE_UNGATE);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_VCE,
+						AMD_PG_STATE_UNGATE);
+		smu7_update_vce_dpm(hwmgr, false);
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 0a6c833720df..b1de9e8ccdbc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2624,6 +2624,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
 		smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
 		smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
 		smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
+
 		break;
 	case AMD_DPM_FORCED_LEVEL_MANUAL:
 		hwmgr->dpm_level = level;
@@ -2633,9 +2634,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
 		break;
 	}
 
-	if (level & (AMD_DPM_FORCED_LEVEL_PROFILE_PEAK | AMD_DPM_FORCED_LEVEL_HIGH))
+	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
 		smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
-	else
+	else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
 		smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
 
 	return 0;
@@ -4397,16 +4398,14 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
 		if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
 			return -EINVAL;
 		dep_sclk_table = table_info->vdd_dep_on_sclk;
-		for (i = 0; i < dep_sclk_table->count; i++) {
+		for (i = 0; i < dep_sclk_table->count; i++)
 			clocks->clock[i] = dep_sclk_table->entries[i].clk;
-			clocks->count++;
-		}
+		clocks->count = dep_sclk_table->count;
 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
 		sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
-		for (i = 0; i < sclk_table->count; i++) {
+		for (i = 0; i < sclk_table->count; i++)
 			clocks->clock[i] = sclk_table->entries[i].clk;
-			clocks->count++;
-		}
+		clocks->count = sclk_table->count;
 	}
 
 	return 0;
@@ -4440,14 +4439,13 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
 			clocks->clock[i] = dep_mclk_table->entries[i].clk;
 			clocks->latency[i] = smu7_get_mem_latency(hwmgr,
 						dep_mclk_table->entries[i].clk);
-			clocks->count++;
 		}
+		clocks->count = dep_mclk_table->count;
 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
 		mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
-		for (i = 0; i < mclk_table->count; i++) {
+		for (i = 0; i < mclk_table->count; i++)
 			clocks->clock[i] = mclk_table->entries[i].clk;
-			clocks->count++;
-		}
+		clocks->count = mclk_table->count;
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 60c36928284c..c0956a4207a9 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -37,8 +37,10 @@ MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
 
 
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 0b6eaa49a1db..8d8344ed655e 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -135,8 +135,7 @@ static int arcpgu_load(struct drm_device *drm)
 	drm_kms_helper_poll_init(drm);
 
 	arcpgu->fbdev = drm_fbdev_cma_init(drm, 16,
-					      drm->mode_config.num_crtc,
-					      drm->mode_config.num_connector);
+					   drm->mode_config.num_connector);
 	if (IS_ERR(arcpgu->fbdev)) {
 		ret = PTR_ERR(arcpgu->fbdev);
 		arcpgu->fbdev = NULL;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index a2e5b04cdee3..4ce4f970920b 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -349,7 +349,7 @@ static int hdlcd_drm_bind(struct device *dev)
 	drm_mode_config_reset(drm);
 	drm_kms_helper_poll_init(drm);
 
-	hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+	hdlcd->fbdev = drm_fbdev_cma_init(drm, 32,
 					  drm->mode_config.num_connector);
 
 	if (IS_ERR(hdlcd->fbdev)) {
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 99fb0ab39191..8b0672d4aee9 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -457,7 +457,7 @@ static int malidp_bind(struct device *dev)
 
 	drm_mode_config_reset(drm);
 
-	malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+	malidp->fbdev = drm_fbdev_cma_init(drm, 32,
 					   drm->mode_config.num_connector);
 
 	if (IS_ERR(malidp->fbdev)) {
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 78335100cbc3..0233e1dc33e1 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -137,7 +137,7 @@ int armada_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, fbh, 1, 1);
+	ret = drm_fb_helper_init(dev, fbh, 1);
 	if (ret) {
 		DRM_ERROR("failed to initialize drm fb helper\n");
 		goto err_fb_helper;
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index a293c8be232c..560d416deab2 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -148,8 +148,8 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
 			return -ENOSPC;
 
 		mutex_lock(&priv->linear_lock);
-		ret = drm_mm_insert_node(&priv->linear, node, size, align,
-					 DRM_MM_SEARCH_DEFAULT);
+		ret = drm_mm_insert_node_generic(&priv->linear, node,
+						 size, align, 0, 0);
 		mutex_unlock(&priv->linear_lock);
 		if (ret) {
 			kfree(node);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index b085140fae95..5d0ffab411a8 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -315,8 +315,7 @@ int ast_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, &afbdev->helper,
-				 1, 1);
+	ret = drm_fb_helper_init(dev, &afbdev->helper, 1);
 	if (ret)
 		goto free;
 
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 0bf32d6ac39b..427bdff425c2 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -647,7 +647,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 	platform_set_drvdata(pdev, dev);
 
 	dc->fbdev = drm_fbdev_cma_init(dev, 24,
-			dev->mode_config.num_crtc,
 			dev->mode_config.num_connector);
 	if (IS_ERR(dc->fbdev))
 		dc->fbdev = NULL;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 8a12b3f6fc66..aa342515ddf4 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -12,6 +12,10 @@
 
 #include "bochs.h"
 
+static int bochs_modeset = -1;
+module_param_named(modeset, bochs_modeset, int, 0444);
+MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting");
+
 static bool enable_fbdev = true;
 module_param_named(fbdev, enable_fbdev, bool, 0444);
 MODULE_PARM_DESC(fbdev, "register fbdev device");
@@ -214,6 +218,12 @@ static struct pci_driver bochs_pci_driver = {
 
 static int __init bochs_init(void)
 {
+	if (vgacon_text_force() && bochs_modeset == -1)
+		return -EINVAL;
+
+	if (bochs_modeset == 0)
+		return -EINVAL;
+
 	return drm_pci_init(&bochs_driver, &bochs_pci_driver);
 }
 
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 0317c3df6a22..932a769637ef 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -169,8 +169,7 @@ int bochs_fbdev_init(struct bochs_device *bochs)
 	drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
 			      &bochs_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
-				 1, 1);
+	ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index b2c267df7ee7..cdd0a9d44ba1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -9,6 +9,8 @@
  * published by the Free Software Foundation.
  */
 
+#include <asm/unaligned.h>
+
 #include <drm/bridge/mhl.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
@@ -28,7 +30,10 @@
 
 #include "sil-sii8620.h"
 
-#define VAL_RX_HDMI_CTRL2_DEFVAL	VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+#define SII8620_BURST_BUF_LEN 288
+#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+#define MHL1_MAX_LCLK 225000
+#define MHL3_MAX_LCLK 600000
 
 enum sii8620_mode {
 	CM_DISCONNECTED,
@@ -59,6 +64,9 @@ struct sii8620 {
 	struct regulator_bulk_data supplies[2];
 	struct mutex lock; /* context lock, protects fields below */
 	int error;
+	int pixel_clock;
+	unsigned int use_packed_pixel:1;
+	int video_code;
 	enum sii8620_mode mode;
 	enum sii8620_sink_type sink_type;
 	u8 cbus_status;
@@ -66,11 +74,20 @@ struct sii8620 {
 	u8 xstat[MHL_XDS_SIZE];
 	u8 devcap[MHL_DCAP_SIZE];
 	u8 xdevcap[MHL_XDC_SIZE];
-	u8 avif[19];
+	u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
 	struct edid *edid;
 	unsigned int gen2_write_burst:1;
 	enum sii8620_mt_state mt_state;
 	struct list_head mt_queue;
+	struct {
+		int r_size;
+		int r_count;
+		int rx_ack;
+		int rx_count;
+		u8 rx_buf[32];
+		int tx_count;
+		u8 tx_buf[32];
+	} burst;
 };
 
 struct sii8620_mt_msg;
@@ -78,12 +95,15 @@ struct sii8620_mt_msg;
 typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
 				  struct sii8620_mt_msg *msg);
 
+typedef void (*sii8620_cb)(struct sii8620 *ctx, int ret);
+
 struct sii8620_mt_msg {
 	struct list_head node;
 	u8 reg[4];
 	u8 ret;
 	sii8620_mt_msg_cb send;
 	sii8620_mt_msg_cb recv;
+	sii8620_cb continuation;
 };
 
 static const u8 sii8620_i2c_page[] = {
@@ -101,6 +121,7 @@ static void sii8620_fetch_edid(struct sii8620 *ctx);
 static void sii8620_set_upstream_edid(struct sii8620 *ctx);
 static void sii8620_enable_hpd(struct sii8620 *ctx);
 static void sii8620_mhl_disconnected(struct sii8620 *ctx);
+static void sii8620_disconnect(struct sii8620 *ctx);
 
 static int sii8620_clear_error(struct sii8620 *ctx)
 {
@@ -227,6 +248,11 @@ static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val)
 	sii8620_write(ctx, addr, val);
 }
 
+static inline bool sii8620_is_mhl3(struct sii8620 *ctx)
+{
+	return ctx->mode >= CM_MHL3;
+}
+
 static void sii8620_mt_cleanup(struct sii8620 *ctx)
 {
 	struct sii8620_mt_msg *msg, *n;
@@ -251,9 +277,11 @@ static void sii8620_mt_work(struct sii8620 *ctx)
 		ctx->mt_state = MT_STATE_READY;
 		msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
 				       node);
+		list_del(&msg->node);
 		if (msg->recv)
 			msg->recv(ctx, msg);
-		list_del(&msg->node);
+		if (msg->continuation)
+			msg->continuation(ctx, msg->ret);
 		kfree(msg);
 	}
 
@@ -266,9 +294,59 @@ static void sii8620_mt_work(struct sii8620 *ctx)
 		msg->send(ctx, msg);
 }
 
+static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
+{
+	u8 ctrl = BIT_MDT_RCV_CTRL_MDT_RCV_EN;
+
+	if (ctx->gen2_write_burst)
+		return;
+
+	if (ctx->mode >= CM_MHL1)
+		ctrl |= BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN;
+
+	sii8620_write_seq(ctx,
+		REG_MDT_RCV_TIMEOUT, 100,
+		REG_MDT_RCV_CTRL, ctrl
+	);
+	ctx->gen2_write_burst = 1;
+}
+
+static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
+{
+	if (!ctx->gen2_write_burst)
+		return;
+
+	sii8620_write_seq_static(ctx,
+		REG_MDT_XMIT_CTRL, 0,
+		REG_MDT_RCV_CTRL, 0
+	);
+	ctx->gen2_write_burst = 0;
+}
+
+static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
+			| BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
+			| BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
+			| BIT_MDT_XMIT_SM_ERROR,
+		REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
+			| BIT_MDT_IDLE_AFTER_HAWB_DISABLE
+			| BIT_MDT_RFIFO_DATA_RDY
+	);
+	sii8620_enable_gen2_write_burst(ctx);
+}
+
 static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
 				    struct sii8620_mt_msg *msg)
 {
+	if (msg->reg[0] == MHL_SET_INT &&
+	    msg->reg[1] == MHL_INT_REG(RCHANGE) &&
+	    msg->reg[2] == MHL_INT_RC_FEAT_REQ)
+		sii8620_enable_gen2_write_burst(ctx);
+	else
+		sii8620_disable_gen2_write_burst(ctx);
+
 	switch (msg->reg[0]) {
 	case MHL_WRITE_STAT:
 	case MHL_SET_INT:
@@ -281,6 +359,12 @@ static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
 		sii8620_write(ctx, REG_MSC_COMMAND_START,
 			      BIT_MSC_COMMAND_START_MSC_MSG);
 		break;
+	case MHL_READ_DEVCAP_REG:
+	case MHL_READ_XDEVCAP_REG:
+		sii8620_write(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg[1]);
+		sii8620_write(ctx, REG_MSC_COMMAND_START,
+			      BIT_MSC_COMMAND_START_READ_DEVCAP);
+		break;
 	default:
 		dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
 			msg->reg[0]);
@@ -299,6 +383,21 @@ static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx)
 	return msg;
 }
 
+static void sii8620_mt_set_cont(struct sii8620 *ctx, sii8620_cb cont)
+{
+	struct sii8620_mt_msg *msg;
+
+	if (ctx->error)
+		return;
+
+	if (list_empty(&ctx->mt_queue)) {
+		ctx->error = -EINVAL;
+		return;
+	}
+	msg = list_last_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+	msg->continuation = cont;
+}
+
 static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
 {
 	struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
@@ -358,7 +457,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
 	}
 }
 
-static void sii8620_mr_devcap(struct sii8620 *ctx)
+static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
 {
 	static const char * const sink_str[] = {
 		[SINK_NONE] = "NONE",
@@ -366,23 +465,10 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
 		[SINK_DVI] = "DVI"
 	};
 
-	u8 dcap[MHL_DCAP_SIZE];
 	char sink_name[20];
 	struct device *dev = ctx->dev;
 
-	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
-	if (ctx->error < 0)
-		return;
-
-	dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap);
-	dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
-		 dcap[MHL_DCAP_MHL_VERSION] / 16,
-		 dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H],
-		 dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H],
-		 dcap[MHL_DCAP_DEVICE_ID_L]);
-	sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
-
-	if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK))
+	if (ret < 0)
 		return;
 
 	sii8620_fetch_edid(ctx);
@@ -401,18 +487,76 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
 
 	dev_info(dev, "detected sink(type: %s): %s\n",
 		 sink_str[ctx->sink_type], sink_name);
+}
+
+static void sii8620_hsic_init(struct sii8620 *ctx)
+{
+	if (!sii8620_is_mhl3(ctx))
+		return;
+
+	sii8620_write(ctx, REG_FCGC,
+		BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
+	sii8620_setbits(ctx, REG_HRXCTRL3,
+		BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
+	sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
+	sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
+	sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
+	sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
+	sii8620_write_seq_static(ctx,
+		REG_TDMLLCTL, 0,
+		REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
+			BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
+		REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
+		REG_HRXINTL, 0xff,
+		REG_HRXINTH, 0xff,
+		REG_TTXINTL, 0xff,
+		REG_TTXINTH, 0xff,
+		REG_TRXINTL, 0xff,
+		REG_TRXINTH, 0xff,
+		REG_HTXINTL, 0xff,
+		REG_HTXINTH, 0xff,
+		REG_FCINTR0, 0xff,
+		REG_FCINTR1, 0xff,
+		REG_FCINTR2, 0xff,
+		REG_FCINTR3, 0xff,
+		REG_FCINTR4, 0xff,
+		REG_FCINTR5, 0xff,
+		REG_FCINTR6, 0xff,
+		REG_FCINTR7, 0xff
+	);
+}
+
+static void sii8620_edid_read(struct sii8620 *ctx, int ret)
+{
+	if (ret < 0)
+		return;
+
 	sii8620_set_upstream_edid(ctx);
+	sii8620_hsic_init(ctx);
 	sii8620_enable_hpd(ctx);
 }
 
+static void sii8620_mr_devcap(struct sii8620 *ctx)
+{
+	u8 dcap[MHL_DCAP_SIZE];
+	struct device *dev = ctx->dev;
+
+	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
+	if (ctx->error < 0)
+		return;
+
+	dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
+		 dcap[MHL_DCAP_MHL_VERSION] / 16,
+		 dcap[MHL_DCAP_MHL_VERSION] % 16,
+		 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
+		 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
+	sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+}
+
 static void sii8620_mr_xdevcap(struct sii8620 *ctx)
 {
 	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
 			 MHL_XDC_SIZE);
-
-	sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
-			      MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
-	sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
 }
 
 static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
@@ -450,6 +594,197 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
 	msg->recv = sii8620_mt_read_devcap_recv;
 }
 
+static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx,
+		struct sii8620_mt_msg *msg)
+{
+	u8 reg = msg->reg[0] & 0x7f;
+
+	if (msg->reg[0] & 0x80)
+		ctx->xdevcap[reg] = msg->ret;
+	else
+		ctx->devcap[reg] = msg->ret;
+}
+
+static void sii8620_mt_read_devcap_reg(struct sii8620 *ctx, u8 reg)
+{
+	struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+	if (!msg)
+		return;
+
+	msg->reg[0] = (reg & 0x80) ? MHL_READ_XDEVCAP_REG : MHL_READ_DEVCAP_REG;
+	msg->reg[1] = reg;
+	msg->send = sii8620_mt_msc_cmd_send;
+	msg->recv = sii8620_mt_read_devcap_reg_recv;
+}
+
+static inline void sii8620_mt_read_xdevcap_reg(struct sii8620 *ctx, u8 reg)
+{
+	sii8620_mt_read_devcap_reg(ctx, reg | 0x80);
+}
+
+static void *sii8620_burst_get_tx_buf(struct sii8620 *ctx, int len)
+{
+	u8 *buf = &ctx->burst.tx_buf[ctx->burst.tx_count];
+	int size = len + 2;
+
+	if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+		dev_err(ctx->dev, "TX-BLK buffer exhausted\n");
+		ctx->error = -EINVAL;
+		return NULL;
+	}
+
+	ctx->burst.tx_count += size;
+	buf[1] = len;
+
+	return buf + 2;
+}
+
+static u8 *sii8620_burst_get_rx_buf(struct sii8620 *ctx, int len)
+{
+	u8 *buf = &ctx->burst.rx_buf[ctx->burst.rx_count];
+	int size = len + 1;
+
+	if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+		dev_err(ctx->dev, "RX-BLK buffer exhausted\n");
+		ctx->error = -EINVAL;
+		return NULL;
+	}
+
+	ctx->burst.rx_count += size;
+	buf[0] = len;
+
+	return buf + 1;
+}
+
+static void sii8620_burst_send(struct sii8620 *ctx)
+{
+	int tx_left = ctx->burst.tx_count;
+	u8 *d = ctx->burst.tx_buf;
+
+	while (tx_left > 0) {
+		int len = d[1] + 2;
+
+		if (ctx->burst.r_count + len > ctx->burst.r_size)
+			break;
+		d[0] = min(ctx->burst.rx_ack, 255);
+		ctx->burst.rx_ack -= d[0];
+		sii8620_write_buf(ctx, REG_EMSC_XMIT_WRITE_PORT, d, len);
+		ctx->burst.r_count += len;
+		tx_left -= len;
+		d += len;
+	}
+
+	ctx->burst.tx_count = tx_left;
+
+	while (ctx->burst.rx_ack > 0) {
+		u8 b[2] = { min(ctx->burst.rx_ack, 255), 0 };
+
+		if (ctx->burst.r_count + 2 > ctx->burst.r_size)
+			break;
+		ctx->burst.rx_ack -= b[0];
+		sii8620_write_buf(ctx, REG_EMSC_XMIT_WRITE_PORT, b, 2);
+		ctx->burst.r_count += 2;
+	}
+}
+
+static void sii8620_burst_receive(struct sii8620 *ctx)
+{
+	u8 buf[3], *d;
+	int count;
+
+	sii8620_read_buf(ctx, REG_EMSCRFIFOBCNTL, buf, 2);
+	count = get_unaligned_le16(buf);
+	while (count > 0) {
+		int len = min(count, 3);
+
+		sii8620_read_buf(ctx, REG_EMSC_RCV_READ_PORT, buf, len);
+		count -= len;
+		ctx->burst.rx_ack += len - 1;
+		ctx->burst.r_count -= buf[1];
+		if (ctx->burst.r_count < 0)
+			ctx->burst.r_count = 0;
+
+		if (len < 3 || !buf[2])
+			continue;
+
+		len = buf[2];
+		d = sii8620_burst_get_rx_buf(ctx, len);
+		if (!d)
+			continue;
+		sii8620_read_buf(ctx, REG_EMSC_RCV_READ_PORT, d, len);
+		count -= len;
+		ctx->burst.rx_ack += len;
+	}
+}
+
+static void sii8620_burst_tx_rbuf_info(struct sii8620 *ctx, int size)
+{
+	struct mhl_burst_blk_rcv_buffer_info *d =
+		sii8620_burst_get_tx_buf(ctx, sizeof(*d));
+	if (!d)
+		return;
+
+	d->id = cpu_to_be16(MHL_BURST_ID_BLK_RCV_BUFFER_INFO);
+	d->size = cpu_to_le16(size);
+}
+
+static u8 sii8620_checksum(void *ptr, int size)
+{
+	u8 *d = ptr, sum = 0;
+
+	while (size--)
+		sum += *d++;
+
+	return sum;
+}
+
+static void sii8620_mhl_burst_hdr_set(struct mhl3_burst_header *h,
+	enum mhl_burst_id id)
+{
+	h->id = cpu_to_be16(id);
+	h->total_entries = 1;
+	h->sequence_index = 1;
+}
+
+static void sii8620_burst_tx_bits_per_pixel_fmt(struct sii8620 *ctx, u8 fmt)
+{
+	struct mhl_burst_bits_per_pixel_fmt *d;
+	const int size = sizeof(*d) + sizeof(d->desc[0]);
+
+	d = sii8620_burst_get_tx_buf(ctx, size);
+	if (!d)
+		return;
+
+	sii8620_mhl_burst_hdr_set(&d->hdr, MHL_BURST_ID_BITS_PER_PIXEL_FMT);
+	d->num_entries = 1;
+	d->desc[0].stream_id = 0;
+	d->desc[0].pixel_format = fmt;
+	d->hdr.checksum -= sii8620_checksum(d, size);
+}
+
+static void sii8620_burst_rx_all(struct sii8620 *ctx)
+{
+	u8 *d = ctx->burst.rx_buf;
+	int count = ctx->burst.rx_count;
+
+	while (count-- > 0) {
+		int len = *d++;
+		int id = get_unaligned_be16(&d[0]);
+
+		switch (id) {
+		case MHL_BURST_ID_BLK_RCV_BUFFER_INFO:
+			ctx->burst.r_size = get_unaligned_le16(&d[2]);
+			break;
+		default:
+			break;
+		}
+		count -= len;
+		d += len;
+	}
+	ctx->burst.rx_count = 0;
+}
+
 static void sii8620_fetch_edid(struct sii8620 *ctx)
 {
 	u8 lm_ddc, ddc_cmd, int3, cbus;
@@ -537,12 +872,12 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
 				edid = new_edid;
 			}
 		}
-
-		if (fetched + FETCH_SIZE == edid_len)
-			sii8620_write(ctx, REG_INTR3, int3);
 	}
 
-	sii8620_write(ctx, REG_LM_DDC, lm_ddc);
+	sii8620_write_seq(ctx,
+		REG_INTR3_MASK, BIT_DDC_CMD_DONE,
+		REG_LM_DDC, lm_ddc
+	);
 
 end:
 	kfree(ctx->edid);
@@ -641,11 +976,10 @@ static void sii8620_hw_reset(struct sii8620 *ctx)
 
 static void sii8620_cbus_reset(struct sii8620 *ctx)
 {
-	sii8620_write_seq_static(ctx,
-		REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
-			| BIT_PWD_SRST_CBUS_RST_SW_EN,
-		REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN
-	);
+	sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
+		      | BIT_PWD_SRST_CBUS_RST_SW_EN);
+	usleep_range(10000, 20000);
+	sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN);
 }
 
 static void sii8620_set_auto_zone(struct sii8620 *ctx)
@@ -683,48 +1017,208 @@ static void sii8620_stop_video(struct sii8620 *ctx)
 			| BIT_TPI_SC_TPI_AV_MUTE;
 		break;
 	case SINK_HDMI:
+	default:
 		val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
 			| BIT_TPI_SC_TPI_AV_MUTE
 			| BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
 		break;
-	default:
-		return;
 	}
 
 	sii8620_write(ctx, REG_TPI_SC, val);
 }
 
+static void sii8620_set_format(struct sii8620 *ctx)
+{
+	u8 out_fmt;
+
+	if (sii8620_is_mhl3(ctx)) {
+		sii8620_setbits(ctx, REG_M3_P0CTRL,
+				BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
+				ctx->use_packed_pixel ? ~0 : 0);
+	} else {
+		if (ctx->use_packed_pixel)
+			sii8620_write_seq_static(ctx,
+				REG_VID_MODE, BIT_VID_MODE_M1080P,
+				REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
+				REG_MHLTX_CTL6, 0x60
+			);
+		else
+			sii8620_write_seq_static(ctx,
+				REG_VID_MODE, 0,
+				REG_MHL_TOP_CTL, 1,
+				REG_MHLTX_CTL6, 0xa0
+			);
+	}
+
+	if (ctx->use_packed_pixel)
+		out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
+			BIT_TPI_OUTPUT_CSCMODE709;
+	else
+		out_fmt = VAL_TPI_FORMAT(RGB, FULL);
+
+	sii8620_write_seq(ctx,
+		REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
+		REG_TPI_OUTPUT, out_fmt,
+	);
+}
+
+static int mhl3_infoframe_init(struct mhl3_infoframe *frame)
+{
+	memset(frame, 0, sizeof(*frame));
+
+	frame->version = 3;
+	frame->hev_format = -1;
+	return 0;
+}
+
+static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
+		 void *buffer, size_t size)
+{
+	const int frm_len = HDMI_INFOFRAME_HEADER_SIZE + MHL3_INFOFRAME_SIZE;
+	u8 *ptr = buffer;
+
+	if (size < frm_len)
+		return -ENOSPC;
+
+	memset(buffer, 0, size);
+	ptr[0] = HDMI_INFOFRAME_TYPE_VENDOR;
+	ptr[1] = frame->version;
+	ptr[2] = MHL3_INFOFRAME_SIZE;
+	ptr[4] = MHL3_IEEE_OUI & 0xff;
+	ptr[5] = (MHL3_IEEE_OUI >> 8) & 0xff;
+	ptr[6] = (MHL3_IEEE_OUI >> 16) & 0xff;
+	ptr[7] = frame->video_format & 0x3;
+	ptr[7] |= (frame->format_type & 0x7) << 2;
+	ptr[7] |= frame->sep_audio ? BIT(5) : 0;
+	if (frame->hev_format >= 0) {
+		ptr[9] = 1;
+		ptr[10] = (frame->hev_format >> 8) & 0xff;
+		ptr[11] = frame->hev_format & 0xff;
+	}
+	if (frame->av_delay) {
+		bool sign = frame->av_delay < 0;
+		int delay = sign ? -frame->av_delay : frame->av_delay;
+
+		ptr[12] = (delay >> 16) & 0xf;
+		if (sign)
+			ptr[12] |= BIT(4);
+		ptr[13] = (delay >> 8) & 0xff;
+		ptr[14] = delay & 0xff;
+	}
+	ptr[3] -= sii8620_checksum(buffer, frm_len);
+	return frm_len;
+}
+
+static void sii8620_set_infoframes(struct sii8620 *ctx)
+{
+	struct mhl3_infoframe mhl_frm;
+	union hdmi_infoframe frm;
+	u8 buf[31];
+	int ret;
+
+	if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
+		sii8620_write(ctx, REG_TPI_SC,
+			BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+		sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
+			ARRAY_SIZE(ctx->avif) - 3);
+		sii8620_write(ctx, REG_PKT_FILTER_0,
+			BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
+			BIT_PKT_FILTER_0_DROP_MPEG_PKT |
+			BIT_PKT_FILTER_0_DROP_GCP_PKT,
+			BIT_PKT_FILTER_1_DROP_GEN_PKT);
+		return;
+	}
+
+	ret = hdmi_avi_infoframe_init(&frm.avi);
+	frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
+	frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
+	frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
+	frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
+	frm.avi.video_code = ctx->video_code;
+	if (!ret)
+		ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
+	if (ret > 0)
+		sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
+	sii8620_write(ctx, REG_PKT_FILTER_0,
+		BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
+		BIT_PKT_FILTER_0_DROP_MPEG_PKT |
+		BIT_PKT_FILTER_0_DROP_AVI_PKT |
+		BIT_PKT_FILTER_0_DROP_GCP_PKT,
+		BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS |
+		BIT_PKT_FILTER_1_DROP_GEN_PKT |
+		BIT_PKT_FILTER_1_DROP_VSIF_PKT);
+
+	sii8620_write(ctx, REG_TPI_INFO_FSEL, BIT_TPI_INFO_FSEL_EN
+		| BIT_TPI_INFO_FSEL_RPT | VAL_TPI_INFO_FSEL_VSI);
+	ret = mhl3_infoframe_init(&mhl_frm);
+	if (!ret)
+		ret = mhl3_infoframe_pack(&mhl_frm, buf, ARRAY_SIZE(buf));
+	sii8620_write_buf(ctx, REG_TPI_INFO_B0, buf, ret);
+}
+
 static void sii8620_start_hdmi(struct sii8620 *ctx)
 {
 	sii8620_write_seq_static(ctx,
 		REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
 			| BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
 		REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
-			| BIT_VID_OVRRD_M1080P_OVRRD,
-		REG_VID_MODE, 0,
-		REG_MHL_TOP_CTL, 0x1,
-		REG_MHLTX_CTL6, 0xa0,
-		REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
-		REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
-	);
-
-	sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-			      MHL_DST_LM_CLK_MODE_NORMAL |
-			      MHL_DST_LM_PATH_ENABLED);
+			| BIT_VID_OVRRD_M1080P_OVRRD);
+	sii8620_set_format(ctx);
 
-	sii8620_set_auto_zone(ctx);
+	if (!sii8620_is_mhl3(ctx)) {
+		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+			MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
+		sii8620_set_auto_zone(ctx);
+	} else {
+		static const struct {
+			int max_clk;
+			u8 zone;
+			u8 link_rate;
+			u8 rrp_decode;
+		} clk_spec[] = {
+			{ 150000, VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS,
+			  MHL_XDS_LINK_RATE_1_5_GBPS, 0x38 },
+			{ 300000, VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS,
+			  MHL_XDS_LINK_RATE_3_0_GBPS, 0x40 },
+			{ 600000, VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS,
+			  MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
+		};
+		u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
+		int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
+			if (clk < clk_spec[i].max_clk)
+				break;
 
-	sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+		if (100 * clk >= 98 * clk_spec[i].max_clk)
+			p0_ctrl |= BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN;
 
-	sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif,
-			  ARRAY_SIZE(ctx->avif));
+		sii8620_burst_tx_bits_per_pixel_fmt(ctx, ctx->use_packed_pixel);
+		sii8620_burst_send(ctx);
+		sii8620_write_seq(ctx,
+			REG_MHL_DP_CTL0, 0xf0,
+			REG_MHL3_TX_ZONE_CTL, clk_spec[i].zone);
+		sii8620_setbits(ctx, REG_M3_P0CTRL,
+			BIT_M3_P0CTRL_MHL3_P0_PORT_EN
+			| BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN, p0_ctrl);
+		sii8620_setbits(ctx, REG_M3_POSTM, MSK_M3_POSTM_RRP_DECODE,
+			clk_spec[i].rrp_decode);
+		sii8620_write_seq_static(ctx,
+			REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE
+				| BIT_M3_CTRL_H2M_SWRST,
+			REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE
+		);
+		sii8620_mt_write_stat(ctx, MHL_XDS_REG(AVLINK_MODE_CONTROL),
+			clk_spec[i].link_rate);
+	}
 
-	sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2);
+	sii8620_set_infoframes(ctx);
 }
 
 static void sii8620_start_video(struct sii8620 *ctx)
 {
-	if (ctx->mode < CM_MHL3)
+	if (!sii8620_is_mhl3(ctx))
 		sii8620_stop_video(ctx);
 
 	switch (ctx->sink_type) {
@@ -757,44 +1251,6 @@ static void sii8620_enable_hpd(struct sii8620 *ctx)
 	);
 }
 
-static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
-{
-	if (ctx->gen2_write_burst)
-		return;
-
-	sii8620_write_seq_static(ctx,
-		REG_MDT_RCV_TIMEOUT, 100,
-		REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN
-	);
-	ctx->gen2_write_burst = 1;
-}
-
-static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
-{
-	if (!ctx->gen2_write_burst)
-		return;
-
-	sii8620_write_seq_static(ctx,
-		REG_MDT_XMIT_CTRL, 0,
-		REG_MDT_RCV_CTRL, 0
-	);
-	ctx->gen2_write_burst = 0;
-}
-
-static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
-{
-	sii8620_write_seq_static(ctx,
-		REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
-			| BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
-			| BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
-			| BIT_MDT_XMIT_SM_ERROR,
-		REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
-			| BIT_MDT_IDLE_AFTER_HAWB_DISABLE
-			| BIT_MDT_RFIFO_DATA_RDY
-	);
-	sii8620_enable_gen2_write_burst(ctx);
-}
-
 static void sii8620_mhl_discover(struct sii8620 *ctx)
 {
 	sii8620_write_seq_static(ctx,
@@ -838,7 +1294,7 @@ static void sii8620_mhl_discover(struct sii8620 *ctx)
 
 static void sii8620_peer_specific_init(struct sii8620 *ctx)
 {
-	if (ctx->mode == CM_MHL3)
+	if (sii8620_is_mhl3(ctx))
 		sii8620_write_seq_static(ctx,
 			REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
 			REG_EMSCINTRMASK1,
@@ -948,21 +1404,51 @@ static void sii8620_mhl_init(struct sii8620 *ctx)
 	);
 	sii8620_disable_gen2_write_burst(ctx);
 
-	/* currently MHL3 is not supported, so we force version to 0 */
-	sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0);
+	sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), SII8620_MHL_VERSION);
 	sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
 			      MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
 			      | MHL_DST_CONN_POW_STAT);
 	sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
 }
 
+static void sii8620_emsc_enable(struct sii8620 *ctx)
+{
+	u8 reg;
+
+	sii8620_setbits(ctx, REG_GENCTL, BIT_GENCTL_EMSC_EN
+					 | BIT_GENCTL_CLR_EMSC_RFIFO
+					 | BIT_GENCTL_CLR_EMSC_XFIFO, ~0);
+	sii8620_setbits(ctx, REG_GENCTL, BIT_GENCTL_CLR_EMSC_RFIFO
+					 | BIT_GENCTL_CLR_EMSC_XFIFO, 0);
+	sii8620_setbits(ctx, REG_COMMECNT, BIT_COMMECNT_I2C_TO_EMSC_EN, ~0);
+	reg = sii8620_readb(ctx, REG_EMSCINTR);
+	sii8620_write(ctx, REG_EMSCINTR, reg);
+	sii8620_write(ctx, REG_EMSCINTRMASK, BIT_EMSCINTR_SPI_DVLD);
+}
+
+static int sii8620_wait_for_fsm_state(struct sii8620 *ctx, u8 state)
+{
+	int i;
+
+	for (i = 0; i < 10; ++i) {
+		u8 s = sii8620_readb(ctx, REG_COC_STAT_0);
+
+		if ((s & MSK_COC_STAT_0_FSM_STATE) == state)
+			return 0;
+		if (!(s & BIT_COC_STAT_0_PLL_LOCKED))
+			return -EBUSY;
+		usleep_range(4000, 6000);
+	}
+	return -ETIMEDOUT;
+}
+
 static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
 {
+	int ret;
+
 	if (ctx->mode == mode)
 		return;
 
-	ctx->mode = mode;
-
 	switch (mode) {
 	case CM_MHL1:
 		sii8620_write_seq_static(ctx,
@@ -972,15 +1458,46 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
 				| BIT_DPD_OSC_EN,
 			REG_COC_INTR_MASK, 0
 		);
+		ctx->mode = mode;
 		break;
 	case CM_MHL3:
+		sii8620_write(ctx, REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE);
+		ctx->mode = mode;
+		return;
+	case CM_ECBUS_S:
+		sii8620_emsc_enable(ctx);
 		sii8620_write_seq_static(ctx,
-			REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
-			REG_COC_CTL0, 0x40,
-			REG_MHL_COC_CTL1, 0x07
+			REG_TTXSPINUMS, 4,
+			REG_TRXSPINUMS, 4,
+			REG_TTXHSICNUMS, 0x14,
+			REG_TRXHSICNUMS, 0x14,
+			REG_TTXTOTNUMS, 0x18,
+			REG_TRXTOTNUMS, 0x18,
+			REG_PWD_SRST, BIT_PWD_SRST_COC_DOC_RST
+				      | BIT_PWD_SRST_CBUS_RST_SW_EN,
+			REG_MHL_COC_CTL1, 0xbd,
+			REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN,
+			REG_COC_CTLB, 0x01,
+			REG_COC_CTL0, 0x5c,
+			REG_COC_CTL14, 0x03,
+			REG_COC_CTL15, 0x80,
+			REG_MHL_DP_CTL6, BIT_MHL_DP_CTL6_DP_TAP1_SGN
+					 | BIT_MHL_DP_CTL6_DP_TAP1_EN
+					 | BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN,
+			REG_MHL_DP_CTL8, 0x03
 		);
-		break;
+		ret = sii8620_wait_for_fsm_state(ctx, 0x03);
+		sii8620_write_seq_static(ctx,
+			REG_COC_CTL14, 0x00,
+			REG_COC_CTL15, 0x80
+		);
+		if (!ret)
+			sii8620_write(ctx, REG_CBUS3_CNVT, 0x85);
+		else
+			sii8620_disconnect(ctx);
+		return;
 	case CM_DISCONNECTED:
+		ctx->mode = mode;
 		break;
 	default:
 		dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
@@ -1007,10 +1524,12 @@ static void sii8620_disconnect(struct sii8620 *ctx)
 {
 	sii8620_disable_gen2_write_burst(ctx);
 	sii8620_stop_video(ctx);
-	msleep(50);
+	msleep(100);
 	sii8620_cbus_reset(ctx);
 	sii8620_set_mode(ctx, CM_DISCONNECTED);
 	sii8620_write_seq_static(ctx,
+		REG_TX_ZONE_CTL1, 0,
+		REG_MHL_PLL_CTL0, 0x07,
 		REG_COC_CTL0, 0x40,
 		REG_CBUS3_CNVT, 0x84,
 		REG_COC_CTL14, 0x00,
@@ -1123,24 +1642,45 @@ static void sii8620_irq_disc(struct sii8620 *ctx)
 	sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
 }
 
+static void sii8620_read_burst(struct sii8620 *ctx)
+{
+	u8 buf[17];
+
+	sii8620_read_buf(ctx, REG_MDT_RCV_READ_PORT, buf, ARRAY_SIZE(buf));
+	sii8620_write(ctx, REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN |
+		      BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN |
+		      BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR);
+	sii8620_readb(ctx, REG_MDT_RFIFO_STAT);
+}
+
 static void sii8620_irq_g2wb(struct sii8620 *ctx)
 {
 	u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
 
 	if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
-		dev_dbg(ctx->dev, "HAWB idle\n");
+		if (sii8620_is_mhl3(ctx))
+			sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
+				MHL_INT_RC_FEAT_COMPLETE);
+
+	if (stat & BIT_MDT_RFIFO_DATA_RDY)
+		sii8620_read_burst(ctx);
+
+	if (stat & BIT_MDT_XFIFO_EMPTY)
+		sii8620_write(ctx, REG_MDT_XMIT_CTRL, 0);
 
 	sii8620_write(ctx, REG_MDT_INT_0, stat);
 }
 
-static void sii8620_status_changed_dcap(struct sii8620 *ctx)
+static void sii8620_status_dcap_ready(struct sii8620 *ctx)
 {
-	if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) {
-		sii8620_set_mode(ctx, CM_MHL1);
-		sii8620_peer_specific_init(ctx);
-		sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
-			       | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
-	}
+	enum sii8620_mode mode;
+
+	mode = ctx->stat[MHL_DST_VERSION] >= 0x30 ? CM_MHL3 : CM_MHL1;
+	if (mode > ctx->mode)
+		sii8620_set_mode(ctx, mode);
+	sii8620_peer_specific_init(ctx);
+	sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
+		      | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
 }
 
 static void sii8620_status_changed_path(struct sii8620 *ctx)
@@ -1149,7 +1689,9 @@ static void sii8620_status_changed_path(struct sii8620 *ctx)
 		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
 				      MHL_DST_LM_CLK_MODE_NORMAL
 				      | MHL_DST_LM_PATH_ENABLED);
-		sii8620_mt_read_devcap(ctx, false);
+		if (!sii8620_is_mhl3(ctx))
+			sii8620_mt_read_devcap(ctx, false);
+		sii8620_mt_set_cont(ctx, sii8620_sink_detected);
 	} else {
 		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
 				      MHL_DST_LM_CLK_MODE_NORMAL);
@@ -1166,19 +1708,75 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
 	sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
 	sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
 
-	if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
-		sii8620_status_changed_dcap(ctx);
+	if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+		sii8620_status_dcap_ready(ctx);
 
 	if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
 		sii8620_status_changed_path(ctx);
 }
 
+static void sii8620_ecbus_up(struct sii8620 *ctx, int ret)
+{
+	if (ret < 0)
+		return;
+
+	sii8620_set_mode(ctx, CM_ECBUS_S);
+}
+
+static void sii8620_got_ecbus_speed(struct sii8620 *ctx, int ret)
+{
+	if (ret < 0)
+		return;
+
+	sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
+			      MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
+	sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
+	sii8620_mt_set_cont(ctx, sii8620_ecbus_up);
+}
+
+static void sii8620_mhl_burst_emsc_support_set(struct mhl_burst_emsc_support *d,
+	enum mhl_burst_id id)
+{
+	sii8620_mhl_burst_hdr_set(&d->hdr, MHL_BURST_ID_EMSC_SUPPORT);
+	d->num_entries = 1;
+	d->burst_id[0] = cpu_to_be16(id);
+}
+
+static void sii8620_send_features(struct sii8620 *ctx)
+{
+	u8 buf[16];
+
+	sii8620_write(ctx, REG_MDT_XMIT_CTRL, BIT_MDT_XMIT_CTRL_EN
+		| BIT_MDT_XMIT_CTRL_FIXED_BURST_LEN);
+	sii8620_mhl_burst_emsc_support_set((void *)buf,
+		MHL_BURST_ID_HID_PAYLOAD);
+	sii8620_write_buf(ctx, REG_MDT_XMIT_WRITE_PORT, buf, ARRAY_SIZE(buf));
+}
+
 static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
 {
 	u8 ints[MHL_INT_SIZE];
 
 	sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
 	sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+
+	if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_DCAP_CHG) {
+		switch (ctx->mode) {
+		case CM_MHL3:
+			sii8620_mt_read_xdevcap_reg(ctx, MHL_XDC_ECBUS_SPEEDS);
+			sii8620_mt_set_cont(ctx, sii8620_got_ecbus_speed);
+			break;
+		case CM_ECBUS_S:
+			sii8620_mt_read_devcap(ctx, true);
+			break;
+		default:
+			break;
+		}
+	}
+	if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
+		sii8620_send_features(ctx);
+	if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE)
+		sii8620_edid_read(ctx, 0);
 }
 
 static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1261,6 +1859,19 @@ static void sii8620_irq_coc(struct sii8620 *ctx)
 {
 	u8 stat = sii8620_readb(ctx, REG_COC_INTR);
 
+	if (stat & BIT_COC_CALIBRATION_DONE) {
+		u8 cstat = sii8620_readb(ctx, REG_COC_STAT_0);
+
+		cstat &= BIT_COC_STAT_0_PLL_LOCKED | MSK_COC_STAT_0_FSM_STATE;
+		if (cstat == (BIT_COC_STAT_0_PLL_LOCKED | 0x02)) {
+			sii8620_write_seq_static(ctx,
+				REG_COC_CTLB, 0,
+				REG_TRXINTMH, BIT_TDM_INTR_SYNC_DATA
+					      | BIT_TDM_INTR_SYNC_WAIT
+			);
+		}
+	}
+
 	sii8620_write(ctx, REG_COC_INTR, stat);
 }
 
@@ -1289,17 +1900,6 @@ static void sii8620_scdt_high(struct sii8620 *ctx)
 	);
 }
 
-static void sii8620_scdt_low(struct sii8620 *ctx)
-{
-	sii8620_write(ctx, REG_TMDS_CSTAT_P3,
-		      BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS |
-		      BIT_TMDS_CSTAT_P3_CLR_AVI);
-
-	sii8620_stop_video(ctx);
-
-	sii8620_write(ctx, REG_INTR8_MASK, 0);
-}
-
 static void sii8620_irq_scdt(struct sii8620 *ctx)
 {
 	u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1309,8 +1909,6 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
 
 		if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
 			sii8620_scdt_high(ctx);
-		else
-			sii8620_scdt_low(ctx);
 	}
 
 	sii8620_write(ctx, REG_INTR5, stat);
@@ -1351,6 +1949,65 @@ static void sii8620_irq_infr(struct sii8620 *ctx)
 		sii8620_start_video(ctx);
 }
 
+static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
+{
+	if (ret < 0)
+		return;
+
+	sii8620_mt_read_devcap(ctx, false);
+}
+
+static void sii8620_irq_tdm(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_TRXINTH);
+	u8 tdm = sii8620_readb(ctx, REG_TRXSTA2);
+
+	if ((tdm & MSK_TDM_SYNCHRONIZED) == VAL_TDM_SYNCHRONIZED) {
+		ctx->mode = CM_ECBUS_S;
+		ctx->burst.rx_ack = 0;
+		ctx->burst.r_size = SII8620_BURST_BUF_LEN;
+		sii8620_burst_tx_rbuf_info(ctx, SII8620_BURST_BUF_LEN);
+		sii8620_mt_read_devcap(ctx, true);
+		sii8620_mt_set_cont(ctx, sii8620_got_xdevcap);
+	} else {
+		sii8620_write_seq_static(ctx,
+			REG_MHL_PLL_CTL2, 0,
+			REG_MHL_PLL_CTL2, BIT_MHL_PLL_CTL2_CLKDETECT_EN
+		);
+	}
+
+	sii8620_write(ctx, REG_TRXINTH, stat);
+}
+
+static void sii8620_irq_block(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_EMSCINTR);
+
+	if (stat & BIT_EMSCINTR_SPI_DVLD) {
+		u8 bstat = sii8620_readb(ctx, REG_SPIBURSTSTAT);
+
+		if (bstat & BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE)
+			sii8620_burst_receive(ctx);
+	}
+
+	sii8620_write(ctx, REG_EMSCINTR, stat);
+}
+
+static void sii8620_irq_ddc(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_INTR3);
+
+	if (stat & BIT_DDC_CMD_DONE) {
+		sii8620_write(ctx, REG_INTR3_MASK, 0);
+		if (sii8620_is_mhl3(ctx))
+			sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
+					   MHL_INT_RC_FEAT_REQ);
+		else
+			sii8620_edid_read(ctx, 0);
+	}
+	sii8620_write(ctx, REG_INTR3, stat);
+}
+
 /* endian agnostic, non-volatile version of test_bit */
 static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
 {
@@ -1366,9 +2023,12 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
 		{ BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
 		{ BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
 		{ BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
+		{ BIT_FAST_INTR_STAT_TDM, sii8620_irq_tdm },
 		{ BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
 		{ BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
+		{ BIT_FAST_INTR_STAT_BLOCK, sii8620_irq_block },
 		{ BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
+		{ BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
 		{ BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
 		{ BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
 	};
@@ -1383,7 +2043,9 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
 		if (sii8620_test_bit(irq_vec[i].bit, stats))
 			irq_vec[i].handler(ctx);
 
+	sii8620_burst_rx_all(ctx);
 	sii8620_mt_work(ctx);
+	sii8620_burst_send(ctx);
 
 	ret = sii8620_clear_error(ctx);
 	if (ret) {
@@ -1450,22 +2112,41 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
 			       struct drm_display_mode *adjusted_mode)
 {
 	struct sii8620 *ctx = bridge_to_sii8620(bridge);
-	bool ret = false;
-	int max_clock = 74250;
+	int max_lclk;
+	bool ret = true;
 
 	mutex_lock(&ctx->lock);
 
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		goto out;
-
-	if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL)
-		max_clock = 300000;
-
-	ret = mode->clock <= max_clock;
-
-out:
+	max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK;
+	if (max_lclk > 3 * adjusted_mode->clock) {
+		ctx->use_packed_pixel = 0;
+		goto end;
+	}
+	if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
+	    max_lclk > 2 * adjusted_mode->clock) {
+		ctx->use_packed_pixel = 1;
+		goto end;
+	}
+	ret = false;
+end:
+	if (ret) {
+		u8 vic = drm_match_cea_mode(adjusted_mode);
+
+		if (!vic) {
+			union hdmi_infoframe frm;
+			u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
+
+			drm_hdmi_vendor_infoframe_from_display_mode(
+				&frm.vendor.hdmi, adjusted_mode);
+			vic = frm.vendor.hdmi.vic;
+			if (vic >= ARRAY_SIZE(mhl_vic))
+				vic = 0;
+			vic = mhl_vic[vic];
+		}
+		ctx->video_code = vic;
+		ctx->pixel_clock = adjusted_mode->clock;
+	}
 	mutex_unlock(&ctx->lock);
-
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h
index 6ff616a4f6ce..51ab540cf092 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.h
+++ b/drivers/gpu/drm/bridge/sil-sii8620.h
@@ -353,7 +353,7 @@
 #define REG_TTXNUMB				0x0116
 #define MSK_TTXNUMB_TTX_AFFCTRL_3_0		0xf0
 #define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT	BIT(3)
-#define MSK_TTXNUMB_TTX_NUMBPS_2_0		0x07
+#define MSK_TTXNUMB_TTX_NUMBPS			0x07
 
 /* TDM TX NUMSPISYM, default value: 0x04 */
 #define REG_TTXSPINUMS				0x0117
@@ -403,12 +403,16 @@
 
 /* TDM RX Status 2nd, default value: 0x00 */
 #define REG_TRXSTA2				0x015c
+#define MSK_TDM_SYNCHRONIZED			0xc0
+#define VAL_TDM_SYNCHRONIZED			0x80
 
 /* TDM RX INT Low, default value: 0x00 */
 #define REG_TRXINTL				0x0163
 
 /* TDM RX INT High, default value: 0x00 */
 #define REG_TRXINTH				0x0164
+#define BIT_TDM_INTR_SYNC_DATA			BIT(0)
+#define BIT_TDM_INTR_SYNC_WAIT			BIT(1)
 
 /* TDM RX INTMASK High, default value: 0x00 */
 #define REG_TRXINTMH				0x0166
@@ -429,12 +433,14 @@
 
 /* HSIC Keeper, default value: 0x00 */
 #define REG_KEEPER				0x0181
-#define MSK_KEEPER_KEEPER_MODE_1_0		0x03
+#define MSK_KEEPER_MODE				0x03
+#define VAL_KEEPER_MODE_HOST			0
+#define VAL_KEEPER_MODE_DEVICE			2
 
 /* HSIC Flow Control General, default value: 0x02 */
 #define REG_FCGC				0x0183
-#define BIT_FCGC_HSIC_FC_HOSTMODE		BIT(1)
-#define BIT_FCGC_HSIC_FC_ENABLE			BIT(0)
+#define BIT_FCGC_HSIC_HOSTMODE			BIT(1)
+#define BIT_FCGC_HSIC_ENABLE			BIT(0)
 
 /* HSIC Flow Control CTR13, default value: 0xfc */
 #define REG_FCCTR13				0x0191
@@ -841,6 +847,8 @@
 #define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL	0xf0
 #define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL		0x0f
 
+#define REG_MHL_DP_CTL8				0x0352
+
 /* Tx Zone Ctl1, default value: 0x00 */
 #define REG_TX_ZONE_CTL1			0x0361
 #define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE	0x08
@@ -1078,16 +1086,26 @@
 
 /* TPI Info Frame Select, default value: 0x00 */
 #define REG_TPI_INFO_FSEL			0x06bf
-#define BIT_TPI_INFO_FSEL_TPI_INFO_EN		BIT(7)
-#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT		BIT(6)
-#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG	BIT(5)
-#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL		0x07
+#define BIT_TPI_INFO_FSEL_EN			BIT(7)
+#define BIT_TPI_INFO_FSEL_RPT			BIT(6)
+#define BIT_TPI_INFO_FSEL_READ_FLAG		BIT(5)
+#define MSK_TPI_INFO_FSEL_PKT			0x07
+#define VAL_TPI_INFO_FSEL_AVI			0x00
+#define VAL_TPI_INFO_FSEL_SPD			0x01
+#define VAL_TPI_INFO_FSEL_AUD			0x02
+#define VAL_TPI_INFO_FSEL_MPG			0x03
+#define VAL_TPI_INFO_FSEL_GEN			0x04
+#define VAL_TPI_INFO_FSEL_GEN2			0x05
+#define VAL_TPI_INFO_FSEL_VSI			0x06
 
 /* TPI Info Byte #0, default value: 0x00 */
 #define REG_TPI_INFO_B0				0x06c0
 
 /* CoC Status, default value: 0x00 */
 #define REG_COC_STAT_0				0x0700
+#define BIT_COC_STAT_0_PLL_LOCKED		BIT(7)
+#define MSK_COC_STAT_0_FSM_STATE		0x0f
+
 #define REG_COC_STAT_1				0x0701
 #define REG_COC_STAT_2				0x0702
 #define REG_COC_STAT_3				0x0703
@@ -1282,14 +1300,14 @@
 
 /* MDT Transmit Control, default value: 0x70 */
 #define REG_MDT_XMIT_CTRL			0x0588
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN		BIT(7)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN	BIT(6)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID	BIT(4)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
-#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT	BIT(2)
-#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL	BIT(1)
-#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR	BIT(0)
+#define BIT_MDT_XMIT_CTRL_EN			BIT(7)
+#define BIT_MDT_XMIT_CTRL_CMD_MERGE_EN		BIT(6)
+#define BIT_MDT_XMIT_CTRL_FIXED_BURST_LEN	BIT(5)
+#define BIT_MDT_XMIT_CTRL_FIXED_AID		BIT(4)
+#define BIT_MDT_XMIT_CTRL_SINGLE_RUN_EN		BIT(3)
+#define BIT_MDT_XMIT_CTRL_CLR_ABORT_WAIT	BIT(2)
+#define BIT_MDT_XMIT_CTRL_XFIFO_CLR_ALL		BIT(1)
+#define BIT_MDT_XMIT_CTRL_XFIFO_CLR_CUR		BIT(0)
 
 /* MDT Receive WRITE Port, default value: 0x00 */
 #define REG_MDT_XMIT_WRITE_PORT			0x0589
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 79a5cd108245..4cc679278182 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -289,7 +289,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
 			      &cirrus_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
-				 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+				 CIRRUSFB_CONN_LIMIT);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index e5b738660d66..c97588a28216 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -307,9 +307,8 @@ static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
  * @state: the CRTC whose incoming state to update
  * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  *
- * Set a mode (originating from the kernel) on the desired CRTC state. Does
- * not change any other state properties, including enable, active, or
- * mode_changed.
+ * Set a mode (originating from the kernel) on the desired CRTC state and update
+ * the enable property.
  *
  * RETURNS:
  * Zero on success, error code on failure. Cannot return -EDEADLK.
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 9a08445a7a7a..01d936b7be43 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -369,7 +369,7 @@ mode_fixup(struct drm_atomic_state *state)
 	struct drm_connector *connector;
 	struct drm_connector_state *conn_state;
 	int i;
-	bool ret;
+	int ret;
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 		if (!crtc_state->mode_changed &&
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 789b4c65cd69..cc23b9a505c0 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -88,6 +88,30 @@
  */
 
 /**
+ * drm_color_lut_extract - clamp and round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract a degamma/gamma LUT value provided by user (in the form of
+ * &drm_color_lut entries) and round it to the precision supported by the
+ * hardware.
+ */
+uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision)
+{
+	uint32_t val = user_input;
+	uint32_t max = 0xffff >> (16 - bit_precision);
+
+	/* Round only if we're not using full precision. */
+	if (bit_precision < 16) {
+		val += 1UL << (16 - bit_precision - 1);
+		val >>= 16 - bit_precision;
+	}
+
+	return clamp_val(val, 0, max);
+}
+EXPORT_SYMBOL(drm_color_lut_extract);
+
+/**
  * drm_crtc_enable_color_mgmt - enable color management properties
  * @crtc: DRM CRTC
  * @degamma_lut_size: the size of the degamma lut (before CSC)
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 1bdcfd566695..955c5690bf64 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -207,3 +207,6 @@ int drm_mode_cursor2_ioctl(struct drm_device *dev,
 			   void *data, struct drm_file *file_priv);
 int drm_mode_page_flip_ioctl(struct drm_device *dev,
 			     void *data, struct drm_file *file_priv);
+
+/* drm_edid.c */
+void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6cbd67f4fbc5..45ce224688ce 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -465,7 +465,10 @@ static void drm_fs_inode_free(struct inode *inode)
  * that do embed &struct drm_device it must be placed first in the overall
  * structure, and the overall structure must be allocated using kmalloc(): The
  * drm core's release function unconditionally calls kfree() on the @dev pointer
- * when the final reference is released.
+ * when the final reference is released. To override this behaviour, and so
+ * allow embedding of the drm_device inside the driver's device struct at an
+ * arbitrary offset, you must supply a &drm_driver.release callback and control
+ * the finalization explicitly.
  *
  * RETURNS:
  * 0 on success, or error code on failure.
@@ -553,6 +556,41 @@ err_free:
 EXPORT_SYMBOL(drm_dev_init);
 
 /**
+ * drm_dev_fini - Finalize a dead DRM device
+ * @dev: DRM device
+ *
+ * Finalize a dead DRM device. This is the converse to drm_dev_init() and
+ * frees up all data allocated by it. All driver private data should be
+ * finalized first. Note that this function does not free the @dev, that is
+ * left to the caller.
+ *
+ * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
+ * from a &drm_driver.release callback.
+ */
+void drm_dev_fini(struct drm_device *dev)
+{
+	drm_vblank_cleanup(dev);
+
+	if (drm_core_check_feature(dev, DRIVER_GEM))
+		drm_gem_destroy(dev);
+
+	drm_legacy_ctxbitmap_cleanup(dev);
+	drm_ht_remove(&dev->map_hash);
+	drm_fs_inode_free(dev->anon_inode);
+
+	drm_minor_free(dev, DRM_MINOR_PRIMARY);
+	drm_minor_free(dev, DRM_MINOR_RENDER);
+	drm_minor_free(dev, DRM_MINOR_CONTROL);
+
+	mutex_destroy(&dev->master_mutex);
+	mutex_destroy(&dev->ctxlist_mutex);
+	mutex_destroy(&dev->filelist_mutex);
+	mutex_destroy(&dev->struct_mutex);
+	kfree(dev->unique);
+}
+EXPORT_SYMBOL(drm_dev_fini);
+
+/**
  * drm_dev_alloc - Allocate new DRM device
  * @driver: DRM driver to allocate device for
  * @parent: Parent device object
@@ -598,25 +636,12 @@ static void drm_dev_release(struct kref *ref)
 {
 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
 
-	drm_vblank_cleanup(dev);
-
-	if (drm_core_check_feature(dev, DRIVER_GEM))
-		drm_gem_destroy(dev);
-
-	drm_legacy_ctxbitmap_cleanup(dev);
-	drm_ht_remove(&dev->map_hash);
-	drm_fs_inode_free(dev->anon_inode);
-
-	drm_minor_free(dev, DRM_MINOR_PRIMARY);
-	drm_minor_free(dev, DRM_MINOR_RENDER);
-	drm_minor_free(dev, DRM_MINOR_CONTROL);
-
-	mutex_destroy(&dev->master_mutex);
-	mutex_destroy(&dev->ctxlist_mutex);
-	mutex_destroy(&dev->filelist_mutex);
-	mutex_destroy(&dev->struct_mutex);
-	kfree(dev->unique);
-	kfree(dev);
+	if (dev->driver->release) {
+		dev->driver->release(dev);
+	} else {
+		drm_dev_fini(dev);
+		kfree(dev);
+	}
 }
 
 /**
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index baa6ccb3e18b..c8baab9bee0d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -38,6 +38,8 @@
 #include <drm/drm_encoder.h>
 #include <drm/drm_displayid.h>
 
+#include "drm_crtc_internal.h"
+
 #define version_greater(edid, maj, min) \
 	(((edid)->version > (maj)) || \
 	 ((edid)->version == (maj) && (edid)->revision > (min)))
@@ -2153,7 +2155,7 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
 /* fix up 1366x768 mode from 1368x768;
  * GFT/CVT can't express 1366 width which isn't dividable by 8
  */
-static void fixup_mode_1366x768(struct drm_display_mode *mode)
+void drm_mode_fixup_1366x768(struct drm_display_mode *mode)
 {
 	if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
 		mode->hdisplay = 1366;
@@ -2177,7 +2179,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
 		if (!newmode)
 			return modes;
 
-		fixup_mode_1366x768(newmode);
+		drm_mode_fixup_1366x768(newmode);
 		if (!mode_in_range(newmode, edid, timing) ||
 		    !valid_inferred_mode(connector, newmode)) {
 			drm_mode_destroy(dev, newmode);
@@ -2206,7 +2208,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
 		if (!newmode)
 			return modes;
 
-		fixup_mode_1366x768(newmode);
+		drm_mode_fixup_1366x768(newmode);
 		if (!mode_in_range(newmode, edid, timing) ||
 		    !valid_inferred_mode(connector, newmode)) {
 			drm_mode_destroy(dev, newmode);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 0ef8b284a4b8..596fabf18c3e 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -489,15 +489,14 @@ static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
  * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
  * @dev: DRM device
  * @preferred_bpp: Preferred bits per pixel for the device
- * @num_crtc: Number of CRTCs
  * @max_conn_count: Maximum number of connectors
  * @funcs: fb helper functions, in particular a custom dirty() callback
  *
  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  */
 struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
-	unsigned int preferred_bpp, unsigned int num_crtc,
-	unsigned int max_conn_count, const struct drm_framebuffer_funcs *funcs)
+	unsigned int preferred_bpp, unsigned int max_conn_count,
+	const struct drm_framebuffer_funcs *funcs)
 {
 	struct drm_fbdev_cma *fbdev_cma;
 	struct drm_fb_helper *helper;
@@ -514,7 +513,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
 
 	drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+	ret = drm_fb_helper_init(dev, helper, max_conn_count);
 	if (ret < 0) {
 		dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
 		goto err_free;
@@ -554,11 +553,11 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  */
 struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
-	unsigned int preferred_bpp, unsigned int num_crtc,
-	unsigned int max_conn_count)
+	unsigned int preferred_bpp, unsigned int max_conn_count)
 {
-	return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
-				max_conn_count, &drm_fb_cma_funcs);
+	return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
+					     max_conn_count,
+					     &drm_fb_cma_funcs);
 }
 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
 
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index c7fafa175755..f6d4d9700734 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -712,7 +712,6 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
  * drm_fb_helper_init - initialize a drm_fb_helper structure
  * @dev: drm device
  * @fb_helper: driver-allocated fbdev helper structure to initialize
- * @crtc_count: maximum number of crtcs to support in this fbdev emulation
  * @max_conn_count: max connector count
  *
  * This allocates the structures for the fbdev helper with the given limits.
@@ -727,9 +726,10 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
  */
 int drm_fb_helper_init(struct drm_device *dev,
 		       struct drm_fb_helper *fb_helper,
-		       int crtc_count, int max_conn_count)
+		       int max_conn_count)
 {
 	struct drm_crtc *crtc;
+	struct drm_mode_config *config = &dev->mode_config;
 	int i;
 
 	if (!drm_fbdev_emulation)
@@ -738,11 +738,11 @@ int drm_fb_helper_init(struct drm_device *dev,
 	if (!max_conn_count)
 		return -EINVAL;
 
-	fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+	fb_helper->crtc_info = kcalloc(config->num_crtc, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
 	if (!fb_helper->crtc_info)
 		return -ENOMEM;
 
-	fb_helper->crtc_count = crtc_count;
+	fb_helper->crtc_count = config->num_crtc;
 	fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
 	if (!fb_helper->connector_info) {
 		kfree(fb_helper->crtc_info);
@@ -751,7 +751,7 @@ int drm_fb_helper_init(struct drm_device *dev,
 	fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
 	fb_helper->connector_count = 0;
 
-	for (i = 0; i < crtc_count; i++) {
+	for (i = 0; i < fb_helper->crtc_count; i++) {
 		fb_helper->crtc_info[i].mode_set.connectors =
 			kcalloc(max_conn_count,
 				sizeof(struct drm_connector *),
@@ -860,6 +860,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 	if (!drm_fbdev_emulation)
 		return;
 
+	cancel_work_sync(&fb_helper->resume_work);
+	cancel_work_sync(&fb_helper->dirty_work);
+
 	mutex_lock(&kernel_fb_helper_lock);
 	if (!list_empty(&fb_helper->kernel_fb_list)) {
 		list_del(&fb_helper->kernel_fb_list);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index e51876e588d6..8bfb0b327267 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -97,14 +97,6 @@
  * locking would be fully redundant.
  */
 
-static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-						u64 size,
-						u64 alignment,
-						unsigned long color,
-						u64 start,
-						u64 end,
-						enum drm_mm_search_flags flags);
-
 #ifdef CONFIG_DRM_DEBUG_MM
 #include <linux/stackdepot.h>
 
@@ -226,69 +218,151 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 			    &drm_mm_interval_tree_augment);
 }
 
-static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
-				 struct drm_mm_node *node,
-				 u64 size, u64 alignment,
-				 unsigned long color,
-				 u64 range_start, u64 range_end,
-				 enum drm_mm_allocator_flags flags)
+#define RB_INSERT(root, member, expr) do { \
+	struct rb_node **link = &root.rb_node, *rb = NULL; \
+	u64 x = expr(node); \
+	while (*link) { \
+		rb = *link; \
+		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
+			link = &rb->rb_left; \
+		else \
+			link = &rb->rb_right; \
+	} \
+	rb_link_node(&node->member, rb, link); \
+	rb_insert_color(&node->member, &root); \
+} while (0)
+
+#define HOLE_SIZE(NODE) ((NODE)->hole_size)
+#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
+
+static void add_hole(struct drm_mm_node *node)
 {
-	struct drm_mm *mm = hole_node->mm;
-	u64 hole_start = drm_mm_hole_node_start(hole_node);
-	u64 hole_end = drm_mm_hole_node_end(hole_node);
-	u64 adj_start = hole_start;
-	u64 adj_end = hole_end;
+	struct drm_mm *mm = node->mm;
 
-	DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated);
+	node->hole_size =
+		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
+	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 
-	if (mm->color_adjust)
-		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+	RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
 
-	adj_start = max(adj_start, range_start);
-	adj_end = min(adj_end, range_end);
+	list_add(&node->hole_stack, &mm->hole_stack);
+}
 
-	if (flags & DRM_MM_CREATE_TOP)
-		adj_start = adj_end - size;
+static void rm_hole(struct drm_mm_node *node)
+{
+	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 
-	if (alignment) {
-		u64 rem;
+	list_del(&node->hole_stack);
+	rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+	node->hole_size = 0;
 
-		div64_u64_rem(adj_start, alignment, &rem);
-		if (rem) {
-			if (flags & DRM_MM_CREATE_TOP)
-				adj_start -= rem;
-			else
-				adj_start += alignment - rem;
+	DRM_MM_BUG_ON(drm_mm_hole_follows(node));
+}
+
+static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
+{
+	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
+}
+
+static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
+{
+	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
+}
+
+static inline u64 rb_hole_size(struct rb_node *rb)
+{
+	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
+}
+
+static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
+{
+	struct rb_node *best = NULL;
+	struct rb_node **link = &mm->holes_size.rb_node;
+
+	while (*link) {
+		struct rb_node *rb = *link;
+
+		if (size <= rb_hole_size(rb)) {
+			link = &rb->rb_left;
+			best = rb;
+		} else {
+			link = &rb->rb_right;
 		}
 	}
 
-	if (adj_start == hole_start) {
-		hole_node->hole_follows = 0;
-		list_del(&hole_node->hole_stack);
+	return rb_hole_size_to_node(best);
+}
+
+static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
+{
+	struct drm_mm_node *node = NULL;
+	struct rb_node **link = &mm->holes_addr.rb_node;
+
+	while (*link) {
+		u64 hole_start;
+
+		node = rb_hole_addr_to_node(*link);
+		hole_start = __drm_mm_hole_node_start(node);
+
+		if (addr < hole_start)
+			link = &node->rb_hole_addr.rb_left;
+		else if (addr > hole_start + node->hole_size)
+			link = &node->rb_hole_addr.rb_right;
+		else
+			break;
 	}
 
-	node->start = adj_start;
-	node->size = size;
-	node->mm = mm;
-	node->color = color;
-	node->allocated = 1;
+	return node;
+}
 
-	list_add(&node->node_list, &hole_node->node_list);
+static struct drm_mm_node *
+first_hole(struct drm_mm *mm,
+	   u64 start, u64 end, u64 size,
+	   enum drm_mm_insert_mode mode)
+{
+	if (RB_EMPTY_ROOT(&mm->holes_size))
+		return NULL;
 
-	drm_mm_interval_tree_add_node(hole_node, node);
+	switch (mode) {
+	default:
+	case DRM_MM_INSERT_BEST:
+		return best_hole(mm, size);
 
-	DRM_MM_BUG_ON(node->start < range_start);
-	DRM_MM_BUG_ON(node->start < adj_start);
-	DRM_MM_BUG_ON(node->start + node->size > adj_end);
-	DRM_MM_BUG_ON(node->start + node->size > range_end);
+	case DRM_MM_INSERT_LOW:
+		return find_hole(mm, start);
 
-	node->hole_follows = 0;
-	if (__drm_mm_hole_node_start(node) < hole_end) {
-		list_add(&node->hole_stack, &mm->hole_stack);
-		node->hole_follows = 1;
+	case DRM_MM_INSERT_HIGH:
+		return find_hole(mm, end);
+
+	case DRM_MM_INSERT_EVICT:
+		return list_first_entry_or_null(&mm->hole_stack,
+						struct drm_mm_node,
+						hole_stack);
 	}
+}
 
-	save_stack(node);
+static struct drm_mm_node *
+next_hole(struct drm_mm *mm,
+	  struct drm_mm_node *node,
+	  enum drm_mm_insert_mode mode)
+{
+	switch (mode) {
+	default:
+	case DRM_MM_INSERT_BEST:
+		return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
+
+	case DRM_MM_INSERT_LOW:
+		return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
+
+	case DRM_MM_INSERT_HIGH:
+		return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
+
+	case DRM_MM_INSERT_EVICT:
+		node = list_next_entry(node, hole_stack);
+		return &node->hole_stack == &mm->hole_stack ? NULL : node;
+	}
 }
 
 /**
@@ -317,21 +391,12 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 		return -ENOSPC;
 
 	/* Find the relevant hole to add our node to */
-	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
-					       node->start, ~(u64)0);
-	if (hole) {
-		if (hole->start < end)
-			return -ENOSPC;
-	} else {
-		hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
-	}
-
-	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
-	if (!drm_mm_hole_follows(hole))
+	hole = find_hole(mm, node->start);
+	if (!hole)
 		return -ENOSPC;
 
 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
-	adj_end = hole_end = __drm_mm_hole_node_end(hole);
+	adj_end = hole_end = hole_start + hole->hole_size;
 
 	if (mm->color_adjust)
 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
@@ -340,70 +405,130 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 		return -ENOSPC;
 
 	node->mm = mm;
-	node->allocated = 1;
 
 	list_add(&node->node_list, &hole->node_list);
-
 	drm_mm_interval_tree_add_node(hole, node);
+	node->allocated = true;
+	node->hole_size = 0;
 
-	if (node->start == hole_start) {
-		hole->hole_follows = 0;
-		list_del(&hole->hole_stack);
-	}
-
-	node->hole_follows = 0;
-	if (end != hole_end) {
-		list_add(&node->hole_stack, &mm->hole_stack);
-		node->hole_follows = 1;
-	}
+	rm_hole(hole);
+	if (node->start > hole_start)
+		add_hole(hole);
+	if (end < hole_end)
+		add_hole(node);
 
 	save_stack(node);
-
 	return 0;
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
 /**
- * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
+ * drm_mm_insert_node_in_range - ranged search for space and insert @node
  * @mm: drm_mm to allocate from
  * @node: preallocate node to insert
  * @size: size of the allocation
  * @alignment: alignment of the allocation
  * @color: opaque tag value to use for this node
- * @start: start of the allowed range for this node
- * @end: end of the allowed range for this node
- * @sflags: flags to fine-tune the allocation search
- * @aflags: flags to fine-tune the allocation behavior
+ * @range_start: start of the allowed range for this node
+ * @range_end: end of the allowed range for this node
+ * @mode: fine-tune the allocation search and placement
  *
  * The preallocated @node must be cleared to 0.
  *
  * Returns:
  * 0 on success, -ENOSPC if there's no suitable hole.
  */
-int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
-					u64 size, u64 alignment,
-					unsigned long color,
-					u64 start, u64 end,
-					enum drm_mm_search_flags sflags,
-					enum drm_mm_allocator_flags aflags)
+int drm_mm_insert_node_in_range(struct drm_mm * const mm,
+				struct drm_mm_node * const node,
+				u64 size, u64 alignment,
+				unsigned long color,
+				u64 range_start, u64 range_end,
+				enum drm_mm_insert_mode mode)
 {
-	struct drm_mm_node *hole_node;
+	struct drm_mm_node *hole;
+	u64 remainder_mask;
 
-	if (WARN_ON(size == 0))
-		return -EINVAL;
+	DRM_MM_BUG_ON(range_start >= range_end);
 
-	hole_node = drm_mm_search_free_in_range_generic(mm,
-							size, alignment, color,
-							start, end, sflags);
-	if (!hole_node)
+	if (unlikely(size == 0 || range_end - range_start < size))
 		return -ENOSPC;
 
-	drm_mm_insert_helper(hole_node, node,
-			     size, alignment, color,
-			     start, end, aflags);
-	return 0;
+	if (alignment <= 1)
+		alignment = 0;
+
+	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
+	for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
+	     hole = next_hole(mm, hole, mode)) {
+		u64 hole_start = __drm_mm_hole_node_start(hole);
+		u64 hole_end = hole_start + hole->hole_size;
+		u64 adj_start, adj_end;
+		u64 col_start, col_end;
+
+		if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
+			break;
+
+		if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
+			break;
+
+		col_start = hole_start;
+		col_end = hole_end;
+		if (mm->color_adjust)
+			mm->color_adjust(hole, color, &col_start, &col_end);
+
+		adj_start = max(col_start, range_start);
+		adj_end = min(col_end, range_end);
+
+		if (adj_end <= adj_start || adj_end - adj_start < size)
+			continue;
+
+		if (mode == DRM_MM_INSERT_HIGH)
+			adj_start = adj_end - size;
+
+		if (alignment) {
+			u64 rem;
+
+			if (likely(remainder_mask))
+				rem = adj_start & remainder_mask;
+			else
+				div64_u64_rem(adj_start, alignment, &rem);
+			if (rem) {
+				adj_start -= rem;
+				if (mode != DRM_MM_INSERT_HIGH)
+					adj_start += alignment;
+
+				if (adj_start < max(col_start, range_start) ||
+				    min(col_end, range_end) - adj_start < size)
+					continue;
+
+				if (adj_end <= adj_start ||
+				    adj_end - adj_start < size)
+					continue;
+			}
+		}
+
+		node->mm = mm;
+		node->size = size;
+		node->start = adj_start;
+		node->color = color;
+		node->hole_size = 0;
+
+		list_add(&node->node_list, &hole->node_list);
+		drm_mm_interval_tree_add_node(hole, node);
+		node->allocated = true;
+
+		rm_hole(hole);
+		if (adj_start > hole_start)
+			add_hole(hole);
+		if (adj_start + size < hole_end)
+			add_hole(node);
+
+		save_stack(node);
+		return 0;
+	}
+
+	return -ENOSPC;
 }
-EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
+EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 
 /**
  * drm_mm_remove_node - Remove a memory node from the allocator.
@@ -421,92 +546,20 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 	DRM_MM_BUG_ON(!node->allocated);
 	DRM_MM_BUG_ON(node->scanned_block);
 
-	prev_node =
-	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
-
-	if (drm_mm_hole_follows(node)) {
-		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
-			      __drm_mm_hole_node_end(node));
-		list_del(&node->hole_stack);
-	} else {
-		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
-			      __drm_mm_hole_node_end(node));
-	}
+	prev_node = list_prev_entry(node, node_list);
 
-	if (!drm_mm_hole_follows(prev_node)) {
-		prev_node->hole_follows = 1;
-		list_add(&prev_node->hole_stack, &mm->hole_stack);
-	} else
-		list_move(&prev_node->hole_stack, &mm->hole_stack);
+	if (drm_mm_hole_follows(node))
+		rm_hole(node);
 
 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
 	list_del(&node->node_list);
-	node->allocated = 0;
-}
-EXPORT_SYMBOL(drm_mm_remove_node);
-
-static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
-{
-	if (end - start < size)
-		return 0;
-
-	if (alignment) {
-		u64 rem;
-
-		div64_u64_rem(start, alignment, &rem);
-		if (rem)
-			start += alignment - rem;
-	}
+	node->allocated = false;
 
-	return end >= start + size;
-}
-
-static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-							u64 size,
-							u64 alignment,
-							unsigned long color,
-							u64 start,
-							u64 end,
-							enum drm_mm_search_flags flags)
-{
-	struct drm_mm_node *entry;
-	struct drm_mm_node *best;
-	u64 adj_start;
-	u64 adj_end;
-	u64 best_size;
-
-	DRM_MM_BUG_ON(mm->scan_active);
-
-	best = NULL;
-	best_size = ~0UL;
-
-	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
-			       flags & DRM_MM_SEARCH_BELOW) {
-		u64 hole_size = adj_end - adj_start;
-
-		if (mm->color_adjust) {
-			mm->color_adjust(entry, color, &adj_start, &adj_end);
-			if (adj_end <= adj_start)
-				continue;
-		}
-
-		adj_start = max(adj_start, start);
-		adj_end = min(adj_end, end);
-
-		if (!check_free_hole(adj_start, adj_end, size, alignment))
-			continue;
-
-		if (!(flags & DRM_MM_SEARCH_BEST))
-			return entry;
-
-		if (hole_size < best_size) {
-			best = entry;
-			best_size = hole_size;
-		}
-	}
-
-	return best;
+	if (drm_mm_hole_follows(prev_node))
+		rm_hole(prev_node);
+	add_hole(prev_node);
 }
+EXPORT_SYMBOL(drm_mm_remove_node);
 
 /**
  * drm_mm_replace_node - move an allocation from @old to @new
@@ -521,18 +574,23 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 {
 	DRM_MM_BUG_ON(!old->allocated);
 
+	*new = *old;
+
 	list_replace(&old->node_list, &new->node_list);
-	list_replace(&old->hole_stack, &new->hole_stack);
 	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
-	new->hole_follows = old->hole_follows;
-	new->mm = old->mm;
-	new->start = old->start;
-	new->size = old->size;
-	new->color = old->color;
-	new->__subtree_last = old->__subtree_last;
-
-	old->allocated = 0;
-	new->allocated = 1;
+
+	if (drm_mm_hole_follows(old)) {
+		list_replace(&old->hole_stack, &new->hole_stack);
+		rb_replace_node(&old->rb_hole_size,
+				&new->rb_hole_size,
+				&old->mm->holes_size);
+		rb_replace_node(&old->rb_hole_addr,
+				&new->rb_hole_addr,
+				&old->mm->holes_addr);
+	}
+
+	old->allocated = false;
+	new->allocated = true;
 }
 EXPORT_SYMBOL(drm_mm_replace_node);
 
@@ -577,7 +635,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
  * @color: opaque tag value to use for the allocation
  * @start: start of the allowed range for the allocation
  * @end: end of the allowed range for the allocation
- * @flags: flags to specify how the allocation will be performed afterwards
+ * @mode: fine-tune the allocation search and placement
  *
  * This simply sets up the scanning routines with the parameters for the desired
  * hole.
@@ -593,7 +651,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
 				 unsigned long color,
 				 u64 start,
 				 u64 end,
-				 unsigned int flags)
+				 enum drm_mm_insert_mode mode)
 {
 	DRM_MM_BUG_ON(start >= end);
 	DRM_MM_BUG_ON(!size || size > end - start);
@@ -608,7 +666,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
 	scan->alignment = alignment;
 	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 	scan->size = size;
-	scan->flags = flags;
+	scan->mode = mode;
 
 	DRM_MM_BUG_ON(end <= start);
 	scan->range_start = start;
@@ -667,7 +725,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
 		return false;
 
-	if (scan->flags == DRM_MM_CREATE_TOP)
+	if (scan->mode == DRM_MM_INSERT_HIGH)
 		adj_start = adj_end - scan->size;
 
 	if (scan->alignment) {
@@ -679,7 +737,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 			div64_u64_rem(adj_start, scan->alignment, &rem);
 		if (rem) {
 			adj_start -= rem;
-			if (scan->flags != DRM_MM_CREATE_TOP)
+			if (scan->mode != DRM_MM_INSERT_HIGH)
 				adj_start += scan->alignment;
 			if (adj_start < max(col_start, scan->range_start) ||
 			    min(col_end, scan->range_end) - adj_start < scan->size)
@@ -775,7 +833,7 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
 
 	hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
 	hole_start = __drm_mm_hole_node_start(hole);
-	hole_end = __drm_mm_hole_node_end(hole);
+	hole_end = hole_start + hole->hole_size;
 
 	DRM_MM_BUG_ON(hole_start > scan->hit_start);
 	DRM_MM_BUG_ON(hole_end < scan->hit_end);
@@ -802,21 +860,22 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 {
 	DRM_MM_BUG_ON(start + size <= start);
 
+	mm->color_adjust = NULL;
+
 	INIT_LIST_HEAD(&mm->hole_stack);
-	mm->scan_active = 0;
+	mm->interval_tree = RB_ROOT;
+	mm->holes_size = RB_ROOT;
+	mm->holes_addr = RB_ROOT;
 
 	/* Clever trick to avoid a special case in the free hole tracking. */
 	INIT_LIST_HEAD(&mm->head_node.node_list);
-	mm->head_node.allocated = 0;
-	mm->head_node.hole_follows = 1;
+	mm->head_node.allocated = false;
 	mm->head_node.mm = mm;
 	mm->head_node.start = start + size;
-	mm->head_node.size = start - mm->head_node.start;
-	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+	mm->head_node.size = -size;
+	add_hole(&mm->head_node);
 
-	mm->interval_tree = RB_ROOT;
-
-	mm->color_adjust = NULL;
+	mm->scan_active = 0;
 }
 EXPORT_SYMBOL(drm_mm_init);
 
@@ -837,20 +896,17 @@ EXPORT_SYMBOL(drm_mm_takedown);
 
 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
 {
-	u64 hole_start, hole_end, hole_size;
-
-	if (entry->hole_follows) {
-		hole_start = drm_mm_hole_node_start(entry);
-		hole_end = drm_mm_hole_node_end(entry);
-		hole_size = hole_end - hole_start;
-		drm_printf(p, "%#018llx-%#018llx: %llu: free\n", hole_start,
-			   hole_end, hole_size);
-		return hole_size;
+	u64 start, size;
+
+	size = entry->hole_size;
+	if (size) {
+		start = drm_mm_hole_node_start(entry);
+		drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
+			   start, start + size, size);
 	}
 
-	return 0;
+	return size;
 }
-
 /**
  * drm_mm_print - print allocator state
  * @mm: drm_mm allocator to print
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index a8616b1a8d22..fd22c1c891bf 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1481,12 +1481,8 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
 
 	mode->type |= DRM_MODE_TYPE_USERDEF;
 	/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
-	if (cmd->xres == 1366 && mode->hdisplay == 1368) {
-		mode->hdisplay = 1366;
-		mode->hsync_start--;
-		mode->hsync_end--;
-		drm_mode_set_name(mode);
-	}
+	if (cmd->xres == 1366)
+		drm_mode_fixup_1366x768(mode);
 	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 	return mode;
 }
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 20cc33d1bfc1..d9100b565198 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -212,8 +212,7 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
 		goto out_unlock;
 	}
 
-	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
-				 pages, 0, DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
 	if (ret)
 		goto out_unlock;
 
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
index 1086e9876f91..4f76c992043f 100644
--- a/drivers/gpu/drm/etnaviv/Makefile
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -1,6 +1,7 @@
 etnaviv-y := \
 	etnaviv_buffer.o \
 	etnaviv_cmd_parser.o \
+	etnaviv_cmdbuf.o \
 	etnaviv_drv.o \
 	etnaviv_dump.o \
 	etnaviv_gem_prime.o \
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index d9230132dfbc..ed9588f36bc9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -15,6 +15,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_mmu.h"
@@ -125,7 +126,7 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
 	u32 *ptr = buf->vaddr + off;
 
 	dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
-			ptr, etnaviv_iommu_get_cmdbuf_va(gpu, buf) + off, size - len * 4 - off);
+			ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
 
 	print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
 			ptr, len * 4, 0);
@@ -158,7 +159,7 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
 	if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
 		buffer->user_size = 0;
 
-	return etnaviv_iommu_get_cmdbuf_va(gpu, buffer) + buffer->user_size;
+	return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
 }
 
 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
@@ -169,7 +170,7 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
 	buffer->user_size = 0;
 
 	CMD_WAIT(buffer);
-	CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+	CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
 		 buffer->user_size - 4);
 
 	return buffer->user_size / 8;
@@ -261,7 +262,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
 	if (drm_debug & DRM_UT_DRIVER)
 		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
 
-	link_target = etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf);
+	link_target = etnaviv_cmdbuf_get_va(cmdbuf);
 	link_dwords = cmdbuf->size / 8;
 
 	/*
@@ -355,12 +356,13 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
 	CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
 		       VIVS_GL_EVENT_FROM_PE);
 	CMD_WAIT(buffer);
-	CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+	CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
 			    buffer->user_size - 4);
 
 	if (drm_debug & DRM_UT_DRIVER)
 		pr_info("stream link to 0x%08x @ 0x%08x %p\n",
-			return_target, etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf), cmdbuf->vaddr);
+			return_target, etnaviv_cmdbuf_get_va(cmdbuf),
+			cmdbuf->vaddr);
 
 	if (drm_debug & DRM_UT_DRIVER) {
 		print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
index 2a2e5e366ab7..6e3bbcf24160 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -56,6 +56,8 @@ static const struct {
 	ST(0x0644, 1),
 	ST(0x064c, 1),
 	ST(0x0680, 8),
+	ST(0x086c, 1),
+	ST(0x1028, 1),
 	ST(0x1410, 1),
 	ST(0x1430, 1),
 	ST(0x1458, 1),
@@ -73,8 +75,12 @@ static const struct {
 	ST(0x16c0, 8),
 	ST(0x16e0, 8),
 	ST(0x1740, 8),
+	ST(0x17c0, 8),
+	ST(0x17e0, 8),
 	ST(0x2400, 14 * 16),
 	ST(0x10800, 32 * 16),
+	ST(0x14600, 16),
+	ST(0x14800, 8 * 8),
 #undef ST
 };
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
new file mode 100644
index 000000000000..633e0f07cbac
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drm_mm.h>
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+
+#define SUBALLOC_SIZE		SZ_256K
+#define SUBALLOC_GRANULE	SZ_4K
+#define SUBALLOC_GRANULES	(SUBALLOC_SIZE / SUBALLOC_GRANULE)
+
+struct etnaviv_cmdbuf_suballoc {
+	/* suballocated dma buffer properties */
+	struct etnaviv_gpu *gpu;
+	void *vaddr;
+	dma_addr_t paddr;
+
+	/* GPU mapping */
+	u32 iova;
+	struct drm_mm_node vram_node; /* only used on MMUv2 */
+
+	/* allocation management */
+	struct mutex lock;
+	DECLARE_BITMAP(granule_map, SUBALLOC_GRANULES);
+	int free_space;
+	wait_queue_head_t free_event;
+};
+
+struct etnaviv_cmdbuf_suballoc *
+etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu)
+{
+	struct etnaviv_cmdbuf_suballoc *suballoc;
+	int ret;
+
+	suballoc = kzalloc(sizeof(*suballoc), GFP_KERNEL);
+	if (!suballoc)
+		return ERR_PTR(-ENOMEM);
+
+	suballoc->gpu = gpu;
+	mutex_init(&suballoc->lock);
+	init_waitqueue_head(&suballoc->free_event);
+
+	suballoc->vaddr = dma_alloc_wc(gpu->dev, SUBALLOC_SIZE,
+				       &suballoc->paddr, GFP_KERNEL);
+	if (!suballoc->vaddr)
+		goto free_suballoc;
+
+	ret = etnaviv_iommu_get_suballoc_va(gpu, suballoc->paddr,
+					    &suballoc->vram_node, SUBALLOC_SIZE,
+					    &suballoc->iova);
+	if (ret)
+		goto free_dma;
+
+	return suballoc;
+
+free_dma:
+	dma_free_wc(gpu->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr);
+free_suballoc:
+	kfree(suballoc);
+
+	return NULL;
+}
+
+void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
+{
+	etnaviv_iommu_put_suballoc_va(suballoc->gpu, &suballoc->vram_node,
+				      SUBALLOC_SIZE, suballoc->iova);
+	dma_free_wc(suballoc->gpu->dev, SUBALLOC_SIZE, suballoc->vaddr,
+		    suballoc->paddr);
+	kfree(suballoc);
+}
+
+struct etnaviv_cmdbuf *
+etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
+		   size_t nr_bos)
+{
+	struct etnaviv_cmdbuf *cmdbuf;
+	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
+				 sizeof(*cmdbuf));
+	int granule_offs, order, ret;
+
+	cmdbuf = kzalloc(sz, GFP_KERNEL);
+	if (!cmdbuf)
+		return NULL;
+
+	cmdbuf->suballoc = suballoc;
+	cmdbuf->size = size;
+
+	order = order_base_2(ALIGN(size, SUBALLOC_GRANULE) / SUBALLOC_GRANULE);
+retry:
+	mutex_lock(&suballoc->lock);
+	granule_offs = bitmap_find_free_region(suballoc->granule_map,
+					SUBALLOC_GRANULES, order);
+	if (granule_offs < 0) {
+		suballoc->free_space = 0;
+		mutex_unlock(&suballoc->lock);
+		ret = wait_event_interruptible_timeout(suballoc->free_event,
+						       suballoc->free_space,
+						       msecs_to_jiffies(10 * 1000));
+		if (!ret) {
+			dev_err(suballoc->gpu->dev,
+				"Timeout waiting for cmdbuf space\n");
+			return NULL;
+		}
+		goto retry;
+	}
+	mutex_unlock(&suballoc->lock);
+	cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
+	cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
+
+	return cmdbuf;
+}
+
+void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
+{
+	struct etnaviv_cmdbuf_suballoc *suballoc = cmdbuf->suballoc;
+	int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
+				 SUBALLOC_GRANULE);
+
+	mutex_lock(&suballoc->lock);
+	bitmap_release_region(suballoc->granule_map,
+			      cmdbuf->suballoc_offset / SUBALLOC_GRANULE,
+			      order);
+	suballoc->free_space = 1;
+	mutex_unlock(&suballoc->lock);
+	wake_up_all(&suballoc->free_event);
+	kfree(cmdbuf);
+}
+
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
+{
+	return buf->suballoc->iova + buf->suballoc_offset;
+}
+
+dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
+{
+	return buf->suballoc->paddr + buf->suballoc_offset;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
new file mode 100644
index 000000000000..80d78076c679
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_CMDBUF_H__
+#define __ETNAVIV_CMDBUF_H__
+
+#include <linux/types.h>
+
+struct etnaviv_gpu;
+struct etnaviv_cmdbuf_suballoc;
+
+struct etnaviv_cmdbuf {
+	/* suballocator this cmdbuf is allocated from */
+	struct etnaviv_cmdbuf_suballoc *suballoc;
+	/* user context key, must be unique between all active users */
+	struct etnaviv_file_private *ctx;
+	/* cmdbuf properties */
+	int suballoc_offset;
+	void *vaddr;
+	u32 size;
+	u32 user_size;
+	/* fence after which this buffer is to be disposed */
+	struct dma_fence *fence;
+	/* target exec state */
+	u32 exec_state;
+	/* per GPU in-flight list */
+	struct list_head node;
+	/* BOs attached to this command buffer */
+	unsigned int nr_bos;
+	struct etnaviv_vram_mapping *bo_map[0];
+};
+
+struct etnaviv_cmdbuf_suballoc *
+etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu);
+void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
+
+struct etnaviv_cmdbuf *
+etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
+		   size_t nr_bos);
+void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
+
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
+dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf);
+
+#endif /* __ETNAVIV_CMDBUF_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 590be0d1dd95..587e45043542 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -18,11 +18,11 @@
 #include <linux/of_platform.h>
 #include <drm/drm_of.h>
 
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_mmu.h"
-#include "etnaviv_gem.h"
 
 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
 static bool reglog;
@@ -177,7 +177,8 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
 	u32 i;
 
 	seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
-			buf->vaddr, (u64)buf->paddr, size - buf->user_size);
+			buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
+			size - buf->user_size);
 
 	for (i = 0; i < size / 4; i++) {
 		if (i && !(i % 4))
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index af65491a78e2..d019b5e311cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/devcoredump.h>
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_dump.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gpu.h"
@@ -177,12 +178,11 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
 	etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
 			      gpu->buffer->size,
-			      etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer));
+			      etnaviv_cmdbuf_get_va(gpu->buffer));
 
 	list_for_each_entry(cmd, &gpu->active_cmd_list, node)
 		etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
-				      cmd->size,
-				      etnaviv_iommu_get_cmdbuf_va(gpu, cmd));
+				      cmd->size, etnaviv_cmdbuf_get_va(cmd));
 
 	/* Reserve space for the bomap */
 	if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index afdd55ddf821..726090d7a6ac 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/reservation.h>
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gem.h"
@@ -332,8 +333,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
 	bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
 	relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
 	stream = drm_malloc_ab(1, args->stream_size);
-	cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8,
-					args->nr_bos);
+	cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
+				    ALIGN(args->stream_size, 8) + 8,
+				    args->nr_bos);
 	if (!bos || !relocs || !stream || !cmdbuf) {
 		ret = -ENOMEM;
 		goto err_submit_cmds;
@@ -422,7 +424,7 @@ err_submit_objects:
 err_submit_cmds:
 	/* if we still own the cmdbuf */
 	if (cmdbuf)
-		etnaviv_gpu_cmdbuf_free(cmdbuf);
+		etnaviv_cmdbuf_free(cmdbuf);
 	if (stream)
 		drm_free_large(stream);
 	if (bos)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 0a67124bb2a4..130d7d517a19 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -18,6 +18,8 @@
 #include <linux/dma-fence.h>
 #include <linux/moduleparam.h>
 #include <linux/of_device.h>
+
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_dump.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gem.h"
@@ -546,6 +548,37 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
 		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
 }
 
+static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
+{
+	/*
+	 * Base value for VIVS_PM_PULSE_EATER register on models where it
+	 * cannot be read, extracted from vivante kernel driver.
+	 */
+	u32 pulse_eater = 0x01590880;
+
+	if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
+	    etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
+		pulse_eater |= BIT(23);
+
+	}
+
+	if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
+	    etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
+		pulse_eater &= ~BIT(16);
+		pulse_eater |= BIT(17);
+	}
+
+	if ((gpu->identity.revision > 0x5420) &&
+	    (gpu->identity.features & chipFeatures_PIPE_3D))
+	{
+		/* Performance fix: disable internal DFS */
+		pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
+		pulse_eater |= BIT(18);
+	}
+
+	gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
+}
+
 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 {
 	u16 prefetch;
@@ -586,6 +619,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 		gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
 	}
 
+	/* setup the pulse eater */
+	etnaviv_gpu_setup_pulse_eater(gpu);
+
 	/* setup the MMU */
 	etnaviv_iommu_restore(gpu);
 
@@ -593,7 +629,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 	prefetch = etnaviv_buffer_init(gpu);
 
 	gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
-	etnaviv_gpu_start_fe(gpu, etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer),
+	etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
 			     prefetch);
 }
 
@@ -658,8 +694,15 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 		goto fail;
 	}
 
+	gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
+	if (IS_ERR(gpu->cmdbuf_suballoc)) {
+		dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
+		ret = PTR_ERR(gpu->cmdbuf_suballoc);
+		goto fail;
+	}
+
 	/* Create buffer: */
-	gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
+	gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
 	if (!gpu->buffer) {
 		ret = -ENOMEM;
 		dev_err(gpu->dev, "could not create command buffer\n");
@@ -667,7 +710,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 	}
 
 	if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
-	    gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
+	    etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
 		ret = -EINVAL;
 		dev_err(gpu->dev,
 			"command buffer outside valid memory window\n");
@@ -694,7 +737,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 	return 0;
 
 free_buffer:
-	etnaviv_gpu_cmdbuf_free(gpu->buffer);
+	etnaviv_cmdbuf_free(gpu->buffer);
 	gpu->buffer = NULL;
 destroy_iommu:
 	etnaviv_iommu_destroy(gpu->mmu);
@@ -1117,41 +1160,6 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
  * Cmdstream submission/retirement:
  */
 
-struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
-	size_t nr_bos)
-{
-	struct etnaviv_cmdbuf *cmdbuf;
-	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
-				 sizeof(*cmdbuf));
-
-	cmdbuf = kzalloc(sz, GFP_KERNEL);
-	if (!cmdbuf)
-		return NULL;
-
-	if (gpu->mmu->version == ETNAVIV_IOMMU_V2)
-		size = ALIGN(size, SZ_4K);
-
-	cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
-				     GFP_KERNEL);
-	if (!cmdbuf->vaddr) {
-		kfree(cmdbuf);
-		return NULL;
-	}
-
-	cmdbuf->gpu = gpu;
-	cmdbuf->size = size;
-
-	return cmdbuf;
-}
-
-void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
-{
-	etnaviv_iommu_put_cmdbuf_va(cmdbuf->gpu, cmdbuf);
-	dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
-		    cmdbuf->paddr);
-	kfree(cmdbuf);
-}
-
 static void retire_worker(struct work_struct *work)
 {
 	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
@@ -1177,7 +1185,7 @@ static void retire_worker(struct work_struct *work)
 			etnaviv_gem_mapping_unreference(mapping);
 		}
 
-		etnaviv_gpu_cmdbuf_free(cmdbuf);
+		etnaviv_cmdbuf_free(cmdbuf);
 		/*
 		 * We need to balance the runtime PM count caused by
 		 * each submission.  Upon submission, we increment
@@ -1593,10 +1601,15 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
 #endif
 
 	if (gpu->buffer) {
-		etnaviv_gpu_cmdbuf_free(gpu->buffer);
+		etnaviv_cmdbuf_free(gpu->buffer);
 		gpu->buffer = NULL;
 	}
 
+	if (gpu->cmdbuf_suballoc) {
+		etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
+		gpu->cmdbuf_suballoc = NULL;
+	}
+
 	if (gpu->mmu) {
 		etnaviv_iommu_destroy(gpu->mmu);
 		gpu->mmu = NULL;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 8c6b824e9d0a..1c0606ea7d5e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -92,6 +92,7 @@ struct etnaviv_event {
 	struct dma_fence *fence;
 };
 
+struct etnaviv_cmdbuf_suballoc;
 struct etnaviv_cmdbuf;
 
 struct etnaviv_gpu {
@@ -135,6 +136,7 @@ struct etnaviv_gpu {
 	int irq;
 
 	struct etnaviv_iommu *mmu;
+	struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
 
 	/* Power Control: */
 	struct clk *clk_bus;
@@ -150,29 +152,6 @@ struct etnaviv_gpu {
 	struct work_struct recover_work;
 };
 
-struct etnaviv_cmdbuf {
-	/* device this cmdbuf is allocated for */
-	struct etnaviv_gpu *gpu;
-	/* user context key, must be unique between all active users */
-	struct etnaviv_file_private *ctx;
-	/* cmdbuf properties */
-	void *vaddr;
-	dma_addr_t paddr;
-	u32 size;
-	u32 user_size;
-	/* vram node used if the cmdbuf is mapped through the MMUv2 */
-	struct drm_mm_node vram_node;
-	/* fence after which this buffer is to be disposed */
-	struct dma_fence *fence;
-	/* target exec state */
-	u32 exec_state;
-	/* per GPU in-flight list */
-	struct list_head node;
-	/* BOs attached to this command buffer */
-	unsigned int nr_bos;
-	struct etnaviv_vram_mapping *bo_map[0];
-};
-
 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
 {
 	etnaviv_writel(data, gpu->mmio + reg);
@@ -211,9 +190,6 @@ int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
 	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
-struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
-					      u32 size, size_t nr_bos);
-void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 81f1583a7946..7a7c97f599d7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -184,7 +184,7 @@ static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
 	memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
 }
 
-static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
 	.ops = {
 		.domain_free = etnaviv_domain_free,
 		.map = etnaviv_iommuv1_map,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 7e9c4d210a84..cbe447ac5974 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -21,6 +21,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/bitops.h>
 
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_mmu.h"
 #include "etnaviv_iommu.h"
@@ -229,7 +230,7 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
 			memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
 }
 
-static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
 	.ops = {
 		.domain_free = etnaviv_iommuv2_domain_free,
 		.map = etnaviv_iommuv2_map,
@@ -254,7 +255,8 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
 	prefetch = etnaviv_buffer_config_mmuv2(gpu,
 				(u32)etnaviv_domain->mtlb_dma,
 				(u32)etnaviv_domain->bad_page_dma);
-	etnaviv_gpu_start_fe(gpu, gpu->buffer->paddr, prefetch);
+	etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
+			     prefetch);
 	etnaviv_gpu_wait_idle(gpu, 100);
 
 	gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index f503af462dad..f103e787de94 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -15,6 +15,7 @@
  */
 
 #include "common.xml.h"
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gpu.h"
@@ -107,6 +108,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 				   struct drm_mm_node *node, size_t size)
 {
 	struct etnaviv_vram_mapping *free = NULL;
+	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
 	int ret;
 
 	lockdep_assert_held(&mmu->lock);
@@ -117,15 +119,10 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 		struct list_head list;
 		bool found;
 
-		/*
-		 * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
-		 * drm_mm into giving out a low IOVA after address space
-		 * rollover. This needs a proper fix.
-		 */
 		ret = drm_mm_insert_node_in_range(&mmu->mm, node,
-			size, 0, mmu->last_iova, ~0UL,
-			mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
-
+						  size, 0, 0,
+						  mmu->last_iova, U64_MAX,
+						  mode);
 		if (ret != -ENOSPC)
 			break;
 
@@ -140,7 +137,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 		}
 
 		/* Try to retire some entries */
-		drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, 0);
+		drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
 
 		found = 0;
 		INIT_LIST_HEAD(&list);
@@ -192,13 +189,12 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 			list_del_init(&m->scan_node);
 		}
 
+		mode = DRM_MM_INSERT_EVICT;
+
 		/*
 		 * We removed enough mappings so that the new allocation will
-		 * succeed.  Ensure that the MMU will be flushed before the
-		 * associated commit requesting this mapping, and retry the
-		 * allocation one more time.
+		 * succeed, retry the allocation one more time.
 		 */
-		mmu->need_flush = true;
 	}
 
 	return ret;
@@ -250,6 +246,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
 	}
 
 	list_add_tail(&mapping->mmu_node, &mmu->mappings);
+	mmu->need_flush = true;
 	mutex_unlock(&mmu->lock);
 
 	return ret;
@@ -267,6 +264,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
 		etnaviv_iommu_remove_mapping(mmu, mapping);
 
 	list_del(&mapping->mmu_node);
+	mmu->need_flush = true;
 	mutex_unlock(&mmu->lock);
 }
 
@@ -322,55 +320,50 @@ void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
 		etnaviv_iommuv2_restore(gpu);
 }
 
-u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
-				struct etnaviv_cmdbuf *buf)
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
+				  struct drm_mm_node *vram_node, size_t size,
+				  u32 *iova)
 {
 	struct etnaviv_iommu *mmu = gpu->mmu;
 
 	if (mmu->version == ETNAVIV_IOMMU_V1) {
-		return buf->paddr - gpu->memory_base;
+		*iova = paddr - gpu->memory_base;
+		return 0;
 	} else {
 		int ret;
 
-		if (buf->vram_node.allocated)
-			return (u32)buf->vram_node.start;
-
 		mutex_lock(&mmu->lock);
-		ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
-					      buf->size + SZ_64K);
+		ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
 		if (ret < 0) {
 			mutex_unlock(&mmu->lock);
-			return 0;
+			return ret;
 		}
-		ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
-				buf->size, IOMMU_READ);
+		ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
+				IOMMU_READ);
 		if (ret < 0) {
-			drm_mm_remove_node(&buf->vram_node);
+			drm_mm_remove_node(vram_node);
 			mutex_unlock(&mmu->lock);
-			return 0;
+			return ret;
 		}
-		/*
-		 * At least on GC3000 the FE MMU doesn't properly flush old TLB
-		 * entries. Make sure to space the command buffers out in a way
-		 * that the FE MMU prefetch won't load invalid entries.
-		 */
-		mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
+		mmu->last_iova = vram_node->start + size;
 		gpu->mmu->need_flush = true;
 		mutex_unlock(&mmu->lock);
 
-		return (u32)buf->vram_node.start;
+		*iova = (u32)vram_node->start;
+		return 0;
 	}
 }
 
-void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
-				 struct etnaviv_cmdbuf *buf)
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
+				   struct drm_mm_node *vram_node, size_t size,
+				   u32 iova)
 {
 	struct etnaviv_iommu *mmu = gpu->mmu;
 
-	if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
+	if (mmu->version == ETNAVIV_IOMMU_V2) {
 		mutex_lock(&mmu->lock);
-		iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
-		drm_mm_remove_node(&buf->vram_node);
+		iommu_unmap(mmu->domain,iova, size);
+		drm_mm_remove_node(vram_node);
 		mutex_unlock(&mmu->lock);
 	}
 }
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index e787e49c9693..54be289e5981 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -62,10 +62,12 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
 	struct etnaviv_vram_mapping *mapping);
 void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
 
-u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
-				struct etnaviv_cmdbuf *buf);
-void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
-				 struct etnaviv_cmdbuf *buf);
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
+				  struct drm_mm_node *vram_node, size_t size,
+				  u32 *iova);
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
+				   struct drm_mm_node *vram_node, size_t size,
+				   u32 iova);
 
 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index d69af00bdd6a..0fd6f7a18364 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -13,9 +13,11 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/mfd/syscon.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/pm_runtime.h>
+#include <linux/regmap.h>
 
 #include <video/exynos5433_decon.h>
 
@@ -25,6 +27,9 @@
 #include "exynos_drm_plane.h"
 #include "exynos_drm_iommu.h"
 
+#define DSD_CFG_MUX 0x1004
+#define DSD_CFG_MUX_TE_UNMASK_GLOBAL BIT(13)
+
 #define WINDOWS_NR	3
 #define MIN_FB_WIDTH_FOR_16WORD_BURST	128
 
@@ -57,6 +62,7 @@ struct decon_context {
 	struct exynos_drm_plane		planes[WINDOWS_NR];
 	struct exynos_drm_plane_config	configs[WINDOWS_NR];
 	void __iomem			*addr;
+	struct regmap			*sysreg;
 	struct clk			*clks[ARRAY_SIZE(decon_clks_name)];
 	int				pipe;
 	unsigned long			flags;
@@ -118,18 +124,29 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
 
 static void decon_setup_trigger(struct decon_context *ctx)
 {
-	u32 val = !(ctx->out_type & I80_HW_TRG)
-		? TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
-		  TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
-		: TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
-		  TRIGCON_HWTRIGMASK | TRIGCON_HWTRIGEN;
-	writel(val, ctx->addr + DECON_TRIGCON);
+	if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
+		return;
+
+	if (!(ctx->out_type & I80_HW_TRG)) {
+		writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
+		       | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
+		       ctx->addr + DECON_TRIGCON);
+		return;
+	}
+
+	writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | TRIGCON_HWTRIGMASK
+	       | TRIGCON_HWTRIGEN, ctx->addr + DECON_TRIGCON);
+
+	if (regmap_update_bits(ctx->sysreg, DSD_CFG_MUX,
+			       DSD_CFG_MUX_TE_UNMASK_GLOBAL, ~0))
+		DRM_ERROR("Cannot update sysreg.\n");
 }
 
 static void decon_commit(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
 	struct drm_display_mode *m = &crtc->base.mode;
+	bool interlaced = false;
 	u32 val;
 
 	if (test_bit(BIT_SUSPENDED, &ctx->flags))
@@ -140,13 +157,16 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
 		m->crtc_hsync_end = m->crtc_htotal - 92;
 		m->crtc_vsync_start = m->crtc_vdisplay + 1;
 		m->crtc_vsync_end = m->crtc_vsync_start + 1;
+		if (m->flags & DRM_MODE_FLAG_INTERLACE)
+			interlaced = true;
 	}
 
-	if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
-		decon_setup_trigger(ctx);
+	decon_setup_trigger(ctx);
 
 	/* lcd on and use command if */
 	val = VIDOUT_LCD_ON;
+	if (interlaced)
+		val |= VIDOUT_INTERLACE_EN_F;
 	if (ctx->out_type & IFTYPE_I80) {
 		val |= VIDOUT_COMMAND_IF;
 	} else {
@@ -155,15 +175,21 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
 
 	writel(val, ctx->addr + DECON_VIDOUTCON0);
 
-	val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
-		VIDTCON2_HOZVAL(m->hdisplay - 1);
+	if (interlaced)
+		val = VIDTCON2_LINEVAL(m->vdisplay / 2 - 1) |
+			VIDTCON2_HOZVAL(m->hdisplay - 1);
+	else
+		val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
+			VIDTCON2_HOZVAL(m->hdisplay - 1);
 	writel(val, ctx->addr + DECON_VIDTCON2);
 
 	if (!(ctx->out_type & IFTYPE_I80)) {
-		val = VIDTCON00_VBPD_F(
-				m->crtc_vtotal - m->crtc_vsync_end - 1) |
-			VIDTCON00_VFPD_F(
-				m->crtc_vsync_start - m->crtc_vdisplay - 1);
+		int vbp = m->crtc_vtotal - m->crtc_vsync_end;
+		int vfp = m->crtc_vsync_start - m->crtc_vdisplay;
+
+		if (interlaced)
+			vbp = vbp / 2 - 1;
+		val = VIDTCON00_VBPD_F(vbp - 1) | VIDTCON00_VFPD_F(vfp - 1);
 		writel(val, ctx->addr + DECON_VIDTCON00);
 
 		val = VIDTCON01_VSPW_F(
@@ -278,12 +304,22 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
 	if (test_bit(BIT_SUSPENDED, &ctx->flags))
 		return;
 
-	val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
-	writel(val, ctx->addr + DECON_VIDOSDxA(win));
+	if (crtc->base.mode.flags & DRM_MODE_FLAG_INTERLACE) {
+		val = COORDINATE_X(state->crtc.x) |
+			COORDINATE_Y(state->crtc.y / 2);
+		writel(val, ctx->addr + DECON_VIDOSDxA(win));
+
+		val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
+			COORDINATE_Y((state->crtc.y + state->crtc.h) / 2 - 1);
+		writel(val, ctx->addr + DECON_VIDOSDxB(win));
+	} else {
+		val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
+		writel(val, ctx->addr + DECON_VIDOSDxA(win));
 
-	val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
-		COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
-	writel(val, ctx->addr + DECON_VIDOSDxB(win));
+		val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
+				COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
+		writel(val, ctx->addr + DECON_VIDOSDxB(win));
+	}
 
 	val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
 		VIDOSD_Wx_ALPHA_B_F(0x0);
@@ -355,8 +391,6 @@ static void decon_swreset(struct decon_context *ctx)
 		udelay(10);
 	}
 
-	WARN(tries == 0, "failed to disable DECON\n");
-
 	writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
 	for (tries = 2000; tries; --tries) {
 		if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET)
@@ -557,6 +591,13 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
 
 	if (val) {
 		writel(val, ctx->addr + DECON_VIDINTCON1);
+		if (ctx->out_type & IFTYPE_HDMI) {
+			val = readl(ctx->addr + DECON_VIDOUTCON0);
+			val &= VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F;
+			if (val ==
+			    (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
+				return IRQ_HANDLED;
+		}
 		drm_crtc_handle_vblank(&ctx->crtc->base);
 	}
 
@@ -637,6 +678,15 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
 		ctx->out_type |= IFTYPE_I80;
 	}
 
+	if (ctx->out_type | I80_HW_TRG) {
+		ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+							"samsung,disp-sysreg");
+		if (IS_ERR(ctx->sysreg)) {
+			dev_err(dev, "failed to get system register\n");
+			return PTR_ERR(ctx->sysreg);
+		}
+	}
+
 	for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
 		struct clk *clk;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index a7884bea42eb..bcdb2720b68e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -208,7 +208,6 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
 	struct exynos_drm_fbdev *fbdev;
 	struct exynos_drm_private *private = dev->dev_private;
 	struct drm_fb_helper *helper;
-	unsigned int num_crtc;
 	int ret;
 
 	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
@@ -225,9 +224,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
 
-	num_crtc = dev->mode_config.num_crtc;
-
-	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
+	ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
 	if (ret < 0) {
 		DRM_ERROR("failed to initialize drm fb helper.\n");
 		goto err_init;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 745cfbdf6b39..a9fa444c6053 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -125,10 +125,8 @@ static struct fimd_driver_data exynos3_fimd_driver_data = {
 	.timing_base = 0x20000,
 	.lcdblk_offset = 0x210,
 	.lcdblk_bypass_shift = 1,
-	.trg_type = I80_HW_TRG,
 	.has_shadowcon = 1,
 	.has_vidoutcon = 1,
-	.has_trigger_per_te = 1,
 };
 
 static struct fimd_driver_data exynos4_fimd_driver_data = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 603d8425cca6..2b8bf2dd6387 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1683,7 +1683,7 @@ struct platform_driver g2d_driver = {
 	.probe		= g2d_probe,
 	.remove		= g2d_remove,
 	.driver		= {
-		.name	= "s5p-g2d",
+		.name	= "exynos-drm-g2d",
 		.owner	= THIS_MODULE,
 		.pm	= &g2d_pm_ops,
 		.of_match_table = exynos_g2d_match,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 5ed8b1effe71..88ccc0469316 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -35,6 +35,7 @@
 #include <linux/io.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
+#include <linux/of_graph.h>
 #include <linux/hdmi.h>
 #include <linux/component.h>
 #include <linux/mfd/syscon.h>
@@ -133,6 +134,7 @@ struct hdmi_context {
 	struct regulator_bulk_data	regul_bulk[ARRAY_SIZE(supply)];
 	struct regulator		*reg_hdmi_en;
 	struct exynos_drm_clk		phy_clk;
+	struct drm_bridge		*bridge;
 };
 
 static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
@@ -509,9 +511,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
 	{
 		.pixel_clock = 27000000,
 		.conf = {
-			0x01, 0x51, 0x22, 0x51, 0x08, 0xfc, 0x88, 0x46,
-			0x72, 0x50, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
-			0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x01, 0x51, 0x2d, 0x75, 0x01, 0x00, 0x88, 0x02,
+			0x72, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac,
+			0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
 			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
 		},
 	},
@@ -519,9 +521,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
 		.pixel_clock = 27027000,
 		.conf = {
 			0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3,
-			0x71, 0x50, 0x24, 0x14, 0x24, 0x0f, 0x7c, 0xa5,
-			0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
-			0x28, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+			0x71, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac,
+			0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
 		},
 	},
 	{
@@ -587,6 +589,15 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
 			0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40,
 		},
 	},
+	{
+		.pixel_clock = 297000000,
+		.conf = {
+			0x01, 0x51, 0x3E, 0x05, 0x40, 0xF0, 0x88, 0xC2,
+			0x52, 0x53, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+			0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+		},
+	},
 };
 
 static const char * const hdmi_clk_gates4[] = {
@@ -788,7 +799,8 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
 				sizeof(buf));
 	if (ret > 0) {
 		hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC);
-		hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, ret);
+		hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, 3);
+		hdmi_reg_write_buf(hdata, HDMI_VSI_DATA(0), buf + 3, ret - 3);
 	}
 
 	ret = hdmi_audio_infoframe_init(&frm.audio);
@@ -912,7 +924,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
 	drm_connector_register(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
-	return 0;
+	if (hdata->bridge) {
+		encoder->bridge = hdata->bridge;
+		hdata->bridge->encoder = encoder;
+		ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
+		if (ret)
+			DRM_ERROR("Failed to attach bridge\n");
+	}
+
+	return ret;
 }
 
 static bool hdmi_mode_fixup(struct drm_encoder *encoder,
@@ -1581,6 +1601,31 @@ static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable)
 		hdmiphy_disable(hdata);
 }
 
+static int hdmi_bridge_init(struct hdmi_context *hdata)
+{
+	struct device *dev = hdata->dev;
+	struct device_node *ep, *np;
+
+	ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
+	if (!ep)
+		return 0;
+
+	np = of_graph_get_remote_port_parent(ep);
+	of_node_put(ep);
+	if (!np) {
+		DRM_ERROR("failed to get remote port parent");
+		return -EINVAL;
+	}
+
+	hdata->bridge = of_drm_find_bridge(np);
+	of_node_put(np);
+
+	if (!hdata->bridge)
+		return -EPROBE_DEFER;
+
+	return 0;
+}
+
 static int hdmi_resources_init(struct hdmi_context *hdata)
 {
 	struct device *dev = hdata->dev;
@@ -1620,17 +1665,18 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
 
 	hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
 
-	if (PTR_ERR(hdata->reg_hdmi_en) == -ENODEV)
-		return 0;
+	if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) {
+		if (IS_ERR(hdata->reg_hdmi_en))
+			return PTR_ERR(hdata->reg_hdmi_en);
 
-	if (IS_ERR(hdata->reg_hdmi_en))
-		return PTR_ERR(hdata->reg_hdmi_en);
-
-	ret = regulator_enable(hdata->reg_hdmi_en);
-	if (ret)
-		DRM_ERROR("failed to enable hdmi-en regulator\n");
+		ret = regulator_enable(hdata->reg_hdmi_en);
+		if (ret) {
+			DRM_ERROR("failed to enable hdmi-en regulator\n");
+			return ret;
+		}
+	}
 
-	return ret;
+	return hdmi_bridge_init(hdata);
 }
 
 static struct of_device_id hdmi_match_types[] = {
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 48705248f894..04173235f448 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -94,7 +94,7 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
 			"Invalid legacyfb_depth.  Defaulting to 24bpp\n");
 		legacyfb_depth = 24;
 	}
-	fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1, 1);
+	fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1);
 	if (IS_ERR(fsl_dev->fbdev)) {
 		ret = PTR_ERR(fsl_dev->fbdev);
 		fsl_dev->fbdev = NULL;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
index 3194e544ee27..b3d70a63c5a3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -72,10 +72,8 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
 		return NULL;
 
 	tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL);
-	if (!tcon) {
-		ret = -ENOMEM;
+	if (!tcon)
 		goto err_node_put;
-	}
 
 	ret = fsl_tcon_init_regmap(dev, tcon, np);
 	if (ret) {
@@ -89,9 +87,13 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
 		goto err_node_put;
 	}
 
-	of_node_put(np);
-	clk_prepare_enable(tcon->ipg_clk);
+	ret = clk_prepare_enable(tcon->ipg_clk);
+	if (ret) {
+		dev_err(dev, "Couldn't enable the TCON clock\n");
+		goto err_node_put;
+	}
 
+	of_node_put(np);
 	dev_info(dev, "Using TCON in bypass mode\n");
 
 	return tcon;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index fd1488bf5189..da42d2e1d397 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -564,7 +564,7 @@ int psb_fbdev_init(struct drm_device *dev)
 	drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
-				 dev_priv->ops->crtcs, INTELFB_CONN_LIMIT);
+				 INTELFB_CONN_LIMIT);
 	if (ret)
 		goto free;
 
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index 16fe79053ee1..d7a4d9095b33 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -200,8 +200,7 @@ int hibmc_fbdev_init(struct hibmc_drm_private *priv)
 			      &hibmc_fbdev_helper_funcs);
 
 	/* Now just one crtc and one channel */
-	ret = drm_fb_helper_init(priv->dev,
-				 &hifbdev->helper, 1, 1);
+	ret = drm_fb_helper_init(priv->dev, &hifbdev->helper, 1);
 	if (ret) {
 		DRM_ERROR("failed to initialize fb helper: %d\n", ret);
 		return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 7df0e8535e41..7ec93aec7e88 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -59,8 +59,7 @@ static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
 		drm_fbdev_cma_hotplug_event(priv->fbdev);
 	} else {
 		priv->fbdev = drm_fbdev_cma_init(dev, 32,
-				dev->mode_config.num_crtc,
-				dev->mode_config.num_connector);
+						 dev->mode_config.num_connector);
 		if (IS_ERR(priv->fbdev))
 			priv->fbdev = NULL;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 239e5144dbda..306a2fb362a2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -69,12 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt,
                      struct drm_mm_node *node, u32 size)
 {
 	memset(node, 0, sizeof(*node));
-	return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
-						   size, 0,
-						   I915_COLOR_UNEVICTABLE,
-						   0, ggtt->mappable_end,
-						   DRM_MM_SEARCH_DEFAULT,
-						   DRM_MM_CREATE_DEFAULT);
+	return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+					   size, 0, I915_COLOR_UNEVICTABLE,
+					   0, ggtt->mappable_end,
+					   DRM_MM_INSERT_LOW);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 6d2fcfafd3c5..776c508cb9ae 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -109,6 +109,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	}, **phase;
 	struct i915_vma *vma, *next;
 	struct drm_mm_node *node;
+	enum drm_mm_insert_mode mode;
 	int ret;
 
 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
@@ -127,10 +128,14 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	 * On each list, the oldest objects lie at the HEAD with the freshest
 	 * object on the TAIL.
 	 */
+	mode = DRM_MM_INSERT_BEST;
+	if (flags & PIN_HIGH)
+		mode = DRM_MM_INSERT_HIGH;
+	if (flags & PIN_MAPPABLE)
+		mode = DRM_MM_INSERT_LOW;
 	drm_mm_scan_init_with_range(&scan, &vm->mm,
 				    min_size, alignment, cache_level,
-				    start, end,
-				    flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
+				    start, end, mode);
 
 	/* Retire before we search the active list. Although we have
 	 * reasonable accuracy in our retirement lists, we may have
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ae31065ab708..111b7f8c617a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -437,12 +437,11 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 					       PIN_MAPPABLE | PIN_NONBLOCK);
 		if (IS_ERR(vma)) {
 			memset(&cache->node, 0, sizeof(cache->node));
-			ret = drm_mm_insert_node_in_range_generic
+			ret = drm_mm_insert_node_in_range
 				(&ggtt->base.mm, &cache->node,
 				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
 				 0, ggtt->mappable_end,
-				 DRM_MM_SEARCH_DEFAULT,
-				 DRM_MM_CREATE_DEFAULT);
+				 DRM_MM_INSERT_LOW);
 			if (ret) /* no inactive aperture space, use cpu reloc */
 				return NULL;
 		} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 90acddbaaa5e..3c83f2b91af0 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2754,12 +2754,10 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 		return ret;
 
 	/* Reserve a mappable slot for our lockless error capture */
-	ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
-						  &ggtt->error_capture,
-						  PAGE_SIZE, 0,
-						  I915_COLOR_UNEVICTABLE,
-						  0, ggtt->mappable_end,
-						  0, 0);
+	ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+					  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+					  0, ggtt->mappable_end,
+					  DRM_MM_INSERT_LOW);
 	if (ret)
 		return ret;
 
@@ -3669,7 +3667,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 			u64 size, u64 alignment, unsigned long color,
 			u64 start, u64 end, unsigned int flags)
 {
-	u32 search_flag, alloc_flag;
+	enum drm_mm_insert_mode mode;
 	u64 offset;
 	int err;
 
@@ -3690,13 +3688,11 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
 		return -ENOSPC;
 
-	if (flags & PIN_HIGH) {
-		search_flag = DRM_MM_SEARCH_BELOW;
-		alloc_flag = DRM_MM_CREATE_TOP;
-	} else {
-		search_flag = DRM_MM_SEARCH_DEFAULT;
-		alloc_flag = DRM_MM_CREATE_DEFAULT;
-	}
+	mode = DRM_MM_INSERT_BEST;
+	if (flags & PIN_HIGH)
+		mode = DRM_MM_INSERT_HIGH;
+	if (flags & PIN_MAPPABLE)
+		mode = DRM_MM_INSERT_LOW;
 
 	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
 	 * so we know that we always have a minimum alignment of 4096.
@@ -3708,10 +3704,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 	if (alignment <= I915_GTT_MIN_ALIGNMENT)
 		alignment = 0;
 
-	err = drm_mm_insert_node_in_range_generic(&vm->mm, node,
-						  size, alignment, color,
-						  start, end,
-						  search_flag, alloc_flag);
+	err = drm_mm_insert_node_in_range(&vm->mm, node,
+					  size, alignment, color,
+					  start, end, mode);
 	if (err != -ENOSPC)
 		return err;
 
@@ -3749,9 +3744,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 	if (err)
 		return err;
 
-	search_flag = DRM_MM_SEARCH_DEFAULT;
-	return drm_mm_insert_node_in_range_generic(&vm->mm, node,
-						   size, alignment, color,
-						   start, end,
-						   search_flag, alloc_flag);
+	return drm_mm_insert_node_in_range(&vm->mm, node,
+					   size, alignment, color,
+					   start, end, DRM_MM_INSERT_EVICT);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index cba12ee90cbf..82ca8f49fec1 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,9 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
 		return -ENODEV;
 
 	mutex_lock(&dev_priv->mm.stolen_lock);
-	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
-					  alignment, start, end,
-					  DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
+					  size, alignment, 0,
+					  start, end, DRM_MM_INSERT_BEST);
 	mutex_unlock(&dev_priv->mm.stolen_lock);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 03b7391fb566..5f8e1d65a4b7 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -713,8 +713,7 @@ int intel_fbdev_init(struct drm_device *dev)
 	if (!intel_fbdev_init_bios(dev, ifbdev))
 		ifbdev->preferred_bpp = 32;
 
-	ret = drm_fb_helper_init(dev, &ifbdev->helper,
-				 INTEL_INFO(dev_priv)->num_pipes, 4);
+	ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
 	if (ret) {
 		kfree(ifbdev);
 		return ret;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 33404295b447..5ae48836652e 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -389,8 +389,7 @@ static int imx_drm_bind(struct device *dev)
 		dev_warn(dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
 		legacyfb_depth = 16;
 	}
-	imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
-				drm->mode_config.num_crtc, MAX_CRTC);
+	imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, MAX_CRTC);
 	if (IS_ERR(imxdrm->fbhelper)) {
 		ret = PTR_ERR(imxdrm->fbhelper);
 		imxdrm->fbhelper = NULL;
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index 2591978b8aad..92cf84530f49 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,4 +1,4 @@
-meson-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
-meson-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
+meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
+meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
 
-obj-$(CONFIG_DRM_MESON) += meson.o
+obj-$(CONFIG_DRM_MESON) += meson-drm.o
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index ff1f6019b97b..6f2fd82ed483 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -279,7 +279,6 @@ static int meson_drv_probe(struct platform_device *pdev)
 	drm->mode_config.funcs = &meson_mode_config_funcs;
 
 	priv->fbdev = drm_fbdev_cma_init(drm, 32,
-					 drm->mode_config.num_crtc,
 					 drm->mode_config.num_connector);
 	if (IS_ERR(priv->fbdev)) {
 		ret = PTR_ERR(priv->fbdev);
@@ -329,8 +328,7 @@ static struct platform_driver meson_drm_platform_driver = {
 	.probe      = meson_drv_probe,
 	.remove     = meson_drv_remove,
 	.driver     = {
-		.owner  = THIS_MODULE,
-		.name   = DRIVER_NAME,
+		.name	= "meson-drm",
 		.of_match_table = dt_match,
 	},
 };
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 1a665e1671b8..a449bb91213a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -286,7 +286,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
 	drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
-				 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+				 MGAG200FB_CONN_LIMIT);
 	if (ret)
 		goto err_fb_helper;
 
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 7f78da695dff..5b8e23d051f2 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -72,3 +72,10 @@ config DRM_MSM_DSI_28NM_8960_PHY
 	help
 	  Choose this option if the 28nm DSI PHY 8960 variant is used on the
 	  platform.
+
+config DRM_MSM_DSI_14NM_PHY
+	bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)"
+	depends on DRM_MSM_DSI
+	default y
+	help
+	  Choose this option if DSI PHY on 8996 is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 028c24df2291..39055362da95 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -76,11 +76,13 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
 msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
 msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
+msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
 
 ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
 msm-y += dsi/pll/dsi_pll.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
+msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
 endif
 
 obj-$(CONFIG_DRM_MSM)	+= msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b8647198c11c..4414cf73735d 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -12,6 +12,7 @@
  */
 
 #include "msm_gem.h"
+#include "msm_mmu.h"
 #include "a5xx_gpu.h"
 
 extern bool hang_debug;
@@ -327,7 +328,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
 	/* Enable RBBM error reporting bits */
 	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
 
-	if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+	if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
 		/*
 		 * Mask out the activity signals from RB1-3 to avoid false
 		 * positives
@@ -381,7 +382,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
 
 	gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
 
-	if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+	if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
 		gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
 
 	gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
@@ -573,6 +574,19 @@ static bool a5xx_idle(struct msm_gpu *gpu)
 	return true;
 }
 
+static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
+{
+	struct msm_gpu *gpu = arg;
+	pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
+			iova, flags,
+			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
+			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
+			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
+			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
+
+	return -EFAULT;
+}
+
 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
 {
 	u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
@@ -884,5 +898,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
 		return ERR_PTR(ret);
 	}
 
+	if (gpu->aspace)
+		msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+
 	return gpu;
 }
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 893eb2b2531b..ece39b16a864 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -75,12 +75,14 @@ static const struct adreno_info gpulist[] = {
 		.gmem  = (SZ_1M + SZ_512K),
 		.init  = a4xx_gpu_init,
 	}, {
-		.rev = ADRENO_REV(5, 3, 0, ANY_ID),
+		.rev = ADRENO_REV(5, 3, 0, 2),
 		.revn = 530,
 		.name = "A530",
 		.pm4fw = "a530_pm4.fw",
 		.pfpfw = "a530_pfp.fw",
 		.gmem = SZ_1M,
+		.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
+			ADRENO_QUIRK_FAULT_DETECT_MASK,
 		.init = a5xx_gpu_init,
 		.gpmufw = "a530v3_gpmu.fw2",
 	},
@@ -181,22 +183,51 @@ static void set_gpu_pdev(struct drm_device *dev,
 	priv->gpu_pdev = pdev;
 }
 
-static const struct {
-	const char *str;
-	uint32_t flag;
-} quirks[] = {
-	{ "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
-	{ "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
-};
+static int find_chipid(struct device *dev, u32 *chipid)
+{
+	struct device_node *node = dev->of_node;
+	const char *compat;
+	int ret;
+
+	/* first search the compat strings for qcom,adreno-XYZ.W: */
+	ret = of_property_read_string_index(node, "compatible", 0, &compat);
+	if (ret == 0) {
+		unsigned rev, patch;
+
+		if (sscanf(compat, "qcom,adreno-%u.%u", &rev, &patch) == 2) {
+			*chipid = 0;
+			*chipid |= (rev / 100) << 24;  /* core */
+			rev %= 100;
+			*chipid |= (rev / 10) << 16;   /* major */
+			rev %= 10;
+			*chipid |= rev << 8;           /* minor */
+			*chipid |= patch;
+
+			return 0;
+		}
+	}
+
+	/* and if that fails, fall back to legacy "qcom,chipid" property: */
+	ret = of_property_read_u32(node, "qcom,chipid", chipid);
+	if (ret)
+		return ret;
+
+	dev_warn(dev, "Using legacy qcom,chipid binding!\n");
+	dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
+			(*chipid >> 24) & 0xff, (*chipid >> 16) & 0xff,
+			(*chipid >> 8) & 0xff, *chipid & 0xff);
+
+	return 0;
+}
 
 static int adreno_bind(struct device *dev, struct device *master, void *data)
 {
 	static struct adreno_platform_config config = {};
 	struct device_node *child, *node = dev->of_node;
 	u32 val;
-	int ret, i;
+	int ret;
 
-	ret = of_property_read_u32(node, "qcom,chipid", &val);
+	ret = find_chipid(dev, &val);
 	if (ret) {
 		dev_err(dev, "could not find chipid: %d\n", ret);
 		return ret;
@@ -224,14 +255,12 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
 	}
 
 	if (!config.fast_rate) {
-		dev_err(dev, "could not find clk rates\n");
-		return -ENXIO;
+		dev_warn(dev, "could not find clk rates\n");
+		/* This is a safe low speed for all devices: */
+		config.fast_rate = 200000000;
+		config.slow_rate = 27000000;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(quirks); i++)
-		if (of_property_read_bool(node, quirks[i].str))
-			config.quirks |= quirks[i].flag;
-
 	dev->platform_data = &config;
 	set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
 	return 0;
@@ -260,6 +289,7 @@ static int adreno_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,adreno" },
 	{ .compatible = "qcom,adreno-3xx" },
 	/* for backwards compat w/ downstream kgsl DT files: */
 	{ .compatible = "qcom,kgsl-3d0" },
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 686a580c711a..c9bd1e6225f4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -352,7 +352,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 	adreno_gpu->gmem = adreno_gpu->info->gmem;
 	adreno_gpu->revn = adreno_gpu->info->revn;
 	adreno_gpu->rev = config->rev;
-	adreno_gpu->quirks = config->quirks;
 
 	gpu->fast_rate = config->fast_rate;
 	gpu->slow_rate = config->slow_rate;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index e8d55b0306ed..42e444a67630 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -75,6 +75,7 @@ struct adreno_info {
 	const char *pm4fw, *pfpfw;
 	const char *gpmufw;
 	uint32_t gmem;
+	enum adreno_quirks quirks;
 	struct msm_gpu *(*init)(struct drm_device *dev);
 };
 
@@ -116,8 +117,6 @@ struct adreno_gpu {
 	 * code (a3xx_gpu.c) and stored in this common location.
 	 */
 	const unsigned int *reg_offsets;
-
-	uint32_t quirks;
 };
 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
 
@@ -128,7 +127,6 @@ struct adreno_platform_config {
 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
 	struct msm_bus_scale_pdata *bus_scale_table;
 #endif
-	uint32_t quirks;
 };
 
 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index ec572f8389ed..311c1c1e7d6c 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -18,9 +18,7 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
 	if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
 		return NULL;
 
-	return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ?
-		msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
-		msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
+	return msm_dsi->encoder;
 }
 
 static int dsi_get_phy(struct msm_dsi *msm_dsi)
@@ -187,14 +185,13 @@ void __exit msm_dsi_unregister(void)
 }
 
 int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
-		struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+			 struct drm_encoder *encoder)
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_bridge *ext_bridge;
-	int ret, i;
+	int ret;
 
-	if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
-		!encoders[MSM_DSI_CMD_ENCODER_ID]))
+	if (WARN_ON(!encoder))
 		return -EINVAL;
 
 	msm_dsi->dev = dev;
@@ -205,6 +202,8 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 		goto fail;
 	}
 
+	msm_dsi->encoder = encoder;
+
 	msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
 	if (IS_ERR(msm_dsi->bridge)) {
 		ret = PTR_ERR(msm_dsi->bridge);
@@ -213,11 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 		goto fail;
 	}
 
-	for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
-		encoders[i]->bridge = msm_dsi->bridge;
-		msm_dsi->encoders[i] = encoders[i];
-	}
-
 	/*
 	 * check if the dsi encoder output is connected to a panel or an
 	 * external bridge. We create a connector only if we're connected to a
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 03f115f532c2..32369975d155 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -27,14 +27,24 @@
 #define DSI_1	1
 #define DSI_MAX	2
 
+struct msm_dsi_phy_shared_timings;
+struct msm_dsi_phy_clk_request;
+
 enum msm_dsi_phy_type {
 	MSM_DSI_PHY_28NM_HPM,
 	MSM_DSI_PHY_28NM_LP,
 	MSM_DSI_PHY_20NM,
 	MSM_DSI_PHY_28NM_8960,
+	MSM_DSI_PHY_14NM,
 	MSM_DSI_PHY_MAX
 };
 
+enum msm_dsi_phy_usecase {
+	MSM_DSI_PHY_STANDALONE,
+	MSM_DSI_PHY_MASTER,
+	MSM_DSI_PHY_SLAVE,
+};
+
 #define DSI_DEV_REGULATOR_MAX	8
 #define DSI_BUS_CLK_MAX		4
 
@@ -73,8 +83,8 @@ struct msm_dsi {
 	struct device *phy_dev;
 	bool phy_enabled;
 
-	/* the encoders we are hooked to (outside of dsi block) */
-	struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM];
+	/* the encoder we are hooked to (outside of dsi block) */
+	struct drm_encoder *encoder;
 
 	int id;
 };
@@ -84,12 +94,9 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
 void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
 struct drm_connector *msm_dsi_manager_connector_init(u8 id);
 struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
-int msm_dsi_manager_phy_enable(int id,
-		const unsigned long bit_rate, const unsigned long esc_rate,
-		u32 *clk_pre, u32 *clk_post);
-void msm_dsi_manager_phy_disable(int id);
 int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
 bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
+void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
 int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
 
@@ -111,6 +118,8 @@ int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
 	struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
 void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
 int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
+int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+			    enum msm_dsi_phy_usecase uc);
 #else
 static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 			 enum msm_dsi_phy_type type, int id) {
@@ -131,6 +140,11 @@ static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
 {
 	return 0;
 }
+static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+					  enum msm_dsi_phy_usecase uc)
+{
+	return -ENODEV;
+}
 #endif
 
 /* dsi host */
@@ -146,7 +160,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
 					u32 dma_base, u32 len);
 int msm_dsi_host_enable(struct mipi_dsi_host *host);
 int msm_dsi_host_disable(struct mipi_dsi_host *host);
-int msm_dsi_host_power_on(struct mipi_dsi_host *host);
+int msm_dsi_host_power_on(struct mipi_dsi_host *host,
+			struct msm_dsi_phy_shared_timings *phy_shared_timings);
 int msm_dsi_host_power_off(struct mipi_dsi_host *host);
 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
 					struct drm_display_mode *mode);
@@ -157,6 +172,9 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
 void msm_dsi_host_unregister(struct mipi_dsi_host *host);
 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
 			struct msm_dsi_pll *src_pll);
+void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
+void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
+	struct msm_dsi_phy_clk_request *clk_req);
 void msm_dsi_host_destroy(struct mipi_dsi_host *host);
 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
 					struct drm_device *dev);
@@ -164,14 +182,27 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
 
 /* dsi phy */
 struct msm_dsi_phy;
+struct msm_dsi_phy_shared_timings {
+	u32 clk_post;
+	u32 clk_pre;
+	bool clk_pre_inc_by_2;
+};
+
+struct msm_dsi_phy_clk_request {
+	unsigned long bitclk_rate;
+	unsigned long escclk_rate;
+};
+
 void msm_dsi_phy_driver_register(void);
 void msm_dsi_phy_driver_unregister(void);
 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
-	const unsigned long bit_rate, const unsigned long esc_rate);
+			struct msm_dsi_phy_clk_request *clk_req);
 void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
-void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
-					u32 *clk_pre, u32 *clk_post);
+void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
+			struct msm_dsi_phy_shared_timings *shared_timing);
 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
+void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
+			     enum msm_dsi_phy_usecase uc);
 
 #endif /* __DSI_CONNECTOR_H__ */
 
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 39dff7d5e89b..b3d70ea42891 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36965 bytes, from 2016-11-26 23:01:08)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  27887 bytes, from 2015-10-22 16:34:52)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
-
-Copyright (C) 2013-2015 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml    (  33004 bytes, from 2017-01-11 05:19:19)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-05-09 06:32:54)
+
+Copyright (C) 2013-2017 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -1304,5 +1295,257 @@ static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
 
 #define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG			0x00000018
 
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID0			0x00000000
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID1			0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID2			0x00000008
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID3			0x0000000c
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG0				0x00000010
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK		0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT		4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK;
+}
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK		0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT		4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK;
+}
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG1				0x00000014
+#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL			0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL			0x00000018
+#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_0				0x0000001c
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_1				0x00000020
+
+#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER				0x00000024
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG0				0x00000028
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG1				0x0000002c
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG2				0x00000030
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG0				0x00000034
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG1				0x00000038
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG2				0x0000003c
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG3				0x00000040
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG4				0x00000044
+
+#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL				0x00000048
+#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START			0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL				0x0000004c
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK		0x0000003f
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK			0x000000c0
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT			6
+static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN			0x00000001
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK		0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK		0x00000070
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT		4
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK		0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; }
+
+#define REG_DSI_14nm_PHY_PLL_IE_TRIM				0x00000000
+
+#define REG_DSI_14nm_PHY_PLL_IP_TRIM				0x00000004
+
+#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM				0x00000010
+
+#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN			0x0000001c
+
+#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET			0x00000028
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL			0x0000002c
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2			0x00000030
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3			0x00000034
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4			0x00000038
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5			0x0000003c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1			0x00000040
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2			0x00000044
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1			0x00000048
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2			0x0000004c
+
+#define REG_DSI_14nm_PHY_PLL_VREF_CFG1				0x0000005c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_CODE				0x00000058
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1			0x0000006c
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2			0x00000070
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1				0x00000074
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2				0x00000078
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1			0x0000007c
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2			0x00000080
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3			0x00000084
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN			0x00000088
+
+#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE			0x0000008c
+
+#define REG_DSI_14nm_PHY_PLL_DEC_START				0x00000090
+
+#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER			0x00000094
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1			0x00000098
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2			0x0000009c
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER1				0x000000a0
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER2				0x000000a4
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1			0x000000a8
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2			0x000000ac
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1			0x000000b4
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2			0x000000b8
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3			0x000000bc
+
+#define REG_DSI_14nm_PHY_PLL_TXCLK_EN				0x000000c0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL				0x000000c4
+
+#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS		0x000000cc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_MISC1				0x000000e8
+
+#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR				0x000000f0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET			0x000000f4
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET			0x000000f8
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET			0x000000fc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF1				0x00000100
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV			0x00000104
+
+#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP			0x00000108
+
 
 #endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 63436d8ee470..a5d75c9b3a73 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -94,6 +94,30 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
 	.num_dsi = 2,
 };
 
+/*
+ * TODO: core_mmss_clk fails to enable for some reason, but things work fine
+ * without it too. Figure out why it doesn't enable and uncomment below
+ */
+static const char * const dsi_8996_bus_clk_names[] = {
+	"mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */
+};
+
+static const struct msm_dsi_config msm8996_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 2,
+		.regs = {
+			{"vdda", 18160, 1 },	/* 1.25 V */
+			{"vcca", 17000, 32 },	/* 0.925 V */
+			{"vddio", 100000, 100 },/* 1.8 V */
+		},
+	},
+	.bus_clk_names = dsi_8996_bus_clk_names,
+	.num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
+	.io_start = { 0x994000, 0x996000 },
+	.num_dsi = 2,
+};
+
 static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
 	{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
 	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
@@ -106,6 +130,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
 						&msm8974_apq8084_dsi_cfg},
 	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
 	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
 };
 
 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index eeacc3232494..00a5da2663c6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -24,6 +24,7 @@
 #define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
 #define MSM_DSI_6G_VER_MINOR_V1_3	0x10030000
 #define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
+#define MSM_DSI_6G_VER_MINOR_V1_4_1	0x10040001
 
 #define MSM_DSI_V2_VER_MINOR_8064	0x0
 
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 3819fdefcae2..1fc07ce24686 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -691,17 +691,6 @@ static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
 	return 0;
 }
 
-static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
-{
-	DBG("");
-	dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
-	/* Make sure fully reset */
-	wmb();
-	udelay(1000);
-	dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
-	udelay(100);
-}
-
 static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
 {
 	u32 intr;
@@ -756,7 +745,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
 }
 
 static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
-				u32 clk_pre, u32 clk_post)
+			struct msm_dsi_phy_shared_timings *phy_shared_timings)
 {
 	u32 flags = msm_host->mode_flags;
 	enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
@@ -819,10 +808,16 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
 		data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
 	dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
 
-	data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
-		DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
+	data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
+		DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
 	dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
 
+	if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+	    (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
+	    phy_shared_timings->clk_pre_inc_by_2)
+		dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
+			  DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
+
 	data = 0;
 	if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
 		data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
@@ -1482,6 +1477,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
 	msm_host->format = dsi->format;
 	msm_host->mode_flags = dsi->mode_flags;
 
+	msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags);
+
 	/* Some gpios defined in panel DT need to be controlled by host */
 	ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
 	if (ret)
@@ -1557,8 +1554,9 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
 
 	prop = of_find_property(ep, "data-lanes", &len);
 	if (!prop) {
-		dev_dbg(dev, "failed to find data lane mapping\n");
-		return -EINVAL;
+		dev_dbg(dev,
+			"failed to find data lane mapping, using default\n");
+		return 0;
 	}
 
 	num_lanes = len / sizeof(u32);
@@ -1615,7 +1613,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
 	struct device *dev = &msm_host->pdev->dev;
 	struct device_node *np = dev->of_node;
 	struct device_node *endpoint, *device_node;
-	int ret;
+	int ret = 0;
 
 	/*
 	 * Get the endpoint of the output port of the DSI host. In our case,
@@ -1639,8 +1637,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
 	/* Get panel node from the output port's endpoint data */
 	device_node = of_graph_get_remote_port_parent(endpoint);
 	if (!device_node) {
-		dev_err(dev, "%s: no valid device\n", __func__);
-		ret = -ENODEV;
+		dev_dbg(dev, "%s: no valid device\n", __func__);
 		goto err;
 	}
 
@@ -2118,6 +2115,28 @@ exit:
 	return ret;
 }
 
+void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	DBG("");
+	dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
+	/* Make sure fully reset */
+	wmb();
+	udelay(1000);
+	dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
+	udelay(100);
+}
+
+void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
+	struct msm_dsi_phy_clk_request *clk_req)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
+	clk_req->escclk_rate = msm_host->esc_clk_rate;
+}
+
 int msm_dsi_host_enable(struct mipi_dsi_host *host)
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@@ -2165,10 +2184,10 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
 			SFPB_GPREG_MASTER_PORT_EN(en));
 }
 
-int msm_dsi_host_power_on(struct mipi_dsi_host *host)
+int msm_dsi_host_power_on(struct mipi_dsi_host *host,
+			struct msm_dsi_phy_shared_timings *phy_shared_timings)
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-	u32 clk_pre = 0, clk_post = 0;
 	int ret = 0;
 
 	mutex_lock(&msm_host->dev_mutex);
@@ -2179,12 +2198,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
 
 	msm_dsi_sfpb_config(msm_host, true);
 
-	ret = dsi_calc_clk_rate(msm_host);
-	if (ret) {
-		pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
-		goto unlock_ret;
-	}
-
 	ret = dsi_host_regulator_enable(msm_host);
 	if (ret) {
 		pr_err("%s:Failed to enable vregs.ret=%d\n",
@@ -2192,23 +2205,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
 		goto unlock_ret;
 	}
 
-	ret = dsi_bus_clk_enable(msm_host);
-	if (ret) {
-		pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
-		goto fail_disable_reg;
-	}
-
-	dsi_phy_sw_reset(msm_host);
-	ret = msm_dsi_manager_phy_enable(msm_host->id,
-					msm_host->byte_clk_rate * 8,
-					msm_host->esc_clk_rate,
-					&clk_pre, &clk_post);
-	dsi_bus_clk_disable(msm_host);
-	if (ret) {
-		pr_err("%s: failed to enable phy, %d\n", __func__, ret);
-		goto fail_disable_reg;
-	}
-
 	ret = dsi_clk_ctrl(msm_host, 1);
 	if (ret) {
 		pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
@@ -2224,7 +2220,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
 
 	dsi_timing_setup(msm_host);
 	dsi_sw_reset(msm_host);
-	dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
+	dsi_ctrl_config(msm_host, true, phy_shared_timings);
 
 	if (msm_host->disp_en_gpio)
 		gpiod_set_value(msm_host->disp_en_gpio, 1);
@@ -2253,15 +2249,13 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
 		goto unlock_ret;
 	}
 
-	dsi_ctrl_config(msm_host, false, 0, 0);
+	dsi_ctrl_config(msm_host, false, NULL);
 
 	if (msm_host->disp_en_gpio)
 		gpiod_set_value(msm_host->disp_en_gpio, 0);
 
 	pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
 
-	msm_dsi_manager_phy_disable(msm_host->id);
-
 	dsi_clk_ctrl(msm_host, 0);
 
 	dsi_host_regulator_disable(msm_host);
@@ -2281,6 +2275,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
 					struct drm_display_mode *mode)
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	int ret;
 
 	if (msm_host->mode) {
 		drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,6 +2288,12 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
 		return -ENOMEM;
 	}
 
+	ret = dsi_calc_clk_rate(msm_host);
+	if (ret) {
+		pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 2bd8dad76105..921270ea6059 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -72,11 +72,12 @@ static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
 	return 0;
 }
 
-static int dsi_mgr_host_register(int id)
+static int dsi_mgr_setup_components(int id)
 {
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
 	struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
 	struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+	struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
 	struct msm_dsi_pll *src_pll;
 	int ret;
 
@@ -85,15 +86,16 @@ static int dsi_mgr_host_register(int id)
 		if (ret)
 			return ret;
 
+		msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
 		src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
 		ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
 	} else if (!other_dsi) {
 		ret = 0;
 	} else {
-		struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ?
-					msm_dsi : other_dsi;
-		struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ?
-					other_dsi : msm_dsi;
+		struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
+							msm_dsi : other_dsi;
+		struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
+							other_dsi : msm_dsi;
 		/* Register slave host first, so that slave DSI device
 		 * has a chance to probe, and do not block the master
 		 * DSI device's probe.
@@ -101,14 +103,18 @@ static int dsi_mgr_host_register(int id)
 		 * because only master DSI device adds the panel to global
 		 * panel list. The panel's device is the master DSI device.
 		 */
-		ret = msm_dsi_host_register(sdsi->host, false);
+		ret = msm_dsi_host_register(slave_link_dsi->host, false);
 		if (ret)
 			return ret;
-		ret = msm_dsi_host_register(mdsi->host, true);
+		ret = msm_dsi_host_register(master_link_dsi->host, true);
 		if (ret)
 			return ret;
 
 		/* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */
+		msm_dsi_phy_set_usecase(clk_master_dsi->phy,
+					MSM_DSI_PHY_MASTER);
+		msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
+					MSM_DSI_PHY_SLAVE);
 		src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy);
 		ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
 		if (ret)
@@ -119,6 +125,84 @@ static int dsi_mgr_host_register(int id)
 	return ret;
 }
 
+static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
+		      struct msm_dsi_phy_shared_timings *shared_timings)
+{
+	struct msm_dsi_phy_clk_request clk_req;
+	int ret;
+
+	msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req);
+
+	ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
+	msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
+
+	return ret;
+}
+
+static int
+dsi_mgr_phy_enable(int id,
+		   struct msm_dsi_phy_shared_timings shared_timings[DSI_MAX])
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+	struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+	int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
+	int ret;
+
+	/* In case of dual DSI, some registers in PHY1 have been programmed
+	 * during PLL0 clock's set_rate. The PHY1 reset called by host1 here
+	 * will silently reset those PHY1 registers. Therefore we need to reset
+	 * and enable both PHYs before any PLL clock operation.
+	 */
+	if (IS_DUAL_DSI() && mdsi && sdsi) {
+		if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+			msm_dsi_host_reset_phy(mdsi->host);
+			msm_dsi_host_reset_phy(sdsi->host);
+
+			ret = enable_phy(mdsi, src_pll_id,
+					 &shared_timings[DSI_CLOCK_MASTER]);
+			if (ret)
+				return ret;
+			ret = enable_phy(sdsi, src_pll_id,
+					 &shared_timings[DSI_CLOCK_SLAVE]);
+			if (ret) {
+				msm_dsi_phy_disable(mdsi->phy);
+				return ret;
+			}
+		}
+	} else {
+		msm_dsi_host_reset_phy(mdsi->host);
+		ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
+		if (ret)
+			return ret;
+	}
+
+	msm_dsi->phy_enabled = true;
+
+	return 0;
+}
+
+static void dsi_mgr_phy_disable(int id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+	struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+
+	/* disable DSI phy
+	 * In dual-dsi configuration, the phy should be disabled for the
+	 * first controller only when the second controller is disabled.
+	 */
+	msm_dsi->phy_enabled = false;
+	if (IS_DUAL_DSI() && mdsi && sdsi) {
+		if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+			msm_dsi_phy_disable(sdsi->phy);
+			msm_dsi_phy_disable(mdsi->phy);
+		}
+	} else {
+		msm_dsi_phy_disable(msm_dsi->phy);
+	}
+}
+
 struct dsi_connector {
 	struct drm_connector base;
 	int id;
@@ -168,6 +252,16 @@ static enum drm_connector_status dsi_mgr_connector_detect(
 			msm_dsi->panel = msm_dsi_host_get_panel(
 					other_dsi->host, NULL);
 
+
+		if (msm_dsi->panel && kms->funcs->set_encoder_mode) {
+			bool cmd_mode = !(msm_dsi->device_flags &
+					  MIPI_DSI_MODE_VIDEO);
+			struct drm_encoder *encoder =
+					msm_dsi_get_encoder(msm_dsi);
+
+			kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
+		}
+
 		if (msm_dsi->panel && IS_DUAL_DSI())
 			drm_object_attach_property(&connector->base,
 				connector->dev->mode_config.tile_property, 0);
@@ -344,22 +438,31 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
 	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
 	struct mipi_dsi_host *host = msm_dsi->host;
 	struct drm_panel *panel = msm_dsi->panel;
+	struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX];
 	bool is_dual_dsi = IS_DUAL_DSI();
 	int ret;
 
 	DBG("id=%d", id);
-	if (!msm_dsi_device_connected(msm_dsi) ||
-			(is_dual_dsi && (DSI_1 == id)))
+	if (!msm_dsi_device_connected(msm_dsi))
 		return;
 
-	ret = msm_dsi_host_power_on(host);
+	ret = dsi_mgr_phy_enable(id, phy_shared_timings);
+	if (ret)
+		goto phy_en_fail;
+
+	/* Do nothing with the host if it is DSI 1 in case of dual DSI */
+	if (is_dual_dsi && (DSI_1 == id))
+		return;
+
+	ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]);
 	if (ret) {
 		pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
 		goto host_on_fail;
 	}
 
 	if (is_dual_dsi && msm_dsi1) {
-		ret = msm_dsi_host_power_on(msm_dsi1->host);
+		ret = msm_dsi_host_power_on(msm_dsi1->host,
+					    &phy_shared_timings[DSI_1]);
 		if (ret) {
 			pr_err("%s: power on host1 failed, %d\n",
 							__func__, ret);
@@ -418,6 +521,8 @@ panel_prep_fail:
 host1_on_fail:
 	msm_dsi_host_power_off(host);
 host_on_fail:
+	dsi_mgr_phy_disable(id);
+phy_en_fail:
 	return;
 }
 
@@ -443,10 +548,17 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
 
 	DBG("id=%d", id);
 
-	if (!msm_dsi_device_connected(msm_dsi) ||
-			(is_dual_dsi && (DSI_1 == id)))
+	if (!msm_dsi_device_connected(msm_dsi))
 		return;
 
+	/*
+	 * Do nothing with the host if it is DSI 1 in case of dual DSI.
+	 * It is safe to call dsi_mgr_phy_disable() here because a single PHY
+	 * won't be diabled until both PHYs request disable.
+	 */
+	if (is_dual_dsi && (DSI_1 == id))
+		goto disable_phy;
+
 	if (panel) {
 		ret = drm_panel_disable(panel);
 		if (ret)
@@ -481,6 +593,9 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
 			pr_err("%s: host1 power off failed, %d\n",
 								__func__, ret);
 	}
+
+disable_phy:
+	dsi_mgr_phy_disable(id);
 }
 
 static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
@@ -540,7 +655,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
 	struct drm_connector *connector = NULL;
 	struct dsi_connector *dsi_connector;
-	int ret, i;
+	int ret;
 
 	dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
 	if (!dsi_connector)
@@ -566,9 +681,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
 	connector->interlace_allowed = 0;
 	connector->doublescan_allowed = 0;
 
-	for (i = 0; i < MSM_DSI_ENCODER_NUM; i++)
-		drm_mode_connector_attach_encoder(connector,
-						msm_dsi->encoders[i]);
+	drm_mode_connector_attach_encoder(connector, msm_dsi->encoder);
 
 	return connector;
 }
@@ -591,13 +704,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
 
 	dsi_bridge->id = id;
 
-	/*
-	 * HACK: we may not know the external DSI bridge device's mode
-	 * flags here. We'll get to know them only when the device
-	 * attaches to the dsi host. For now, assume the bridge supports
-	 * DSI video mode
-	 */
-	encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
+	encoder = msm_dsi->encoder;
 
 	bridge = &dsi_bridge->base;
 	bridge->funcs = &dsi_mgr_bridge_funcs;
@@ -628,13 +735,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
 	ext_bridge = msm_dsi->external_bridge =
 			msm_dsi_host_get_bridge(msm_dsi->host);
 
-	/*
-	 * HACK: we may not know the external DSI bridge device's mode
-	 * flags here. We'll get to know them only when the device
-	 * attaches to the dsi host. For now, assume the bridge supports
-	 * DSI video mode
-	 */
-	encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
+	encoder = msm_dsi->encoder;
 
 	/* link the internal dsi bridge to the external bridge */
 	drm_bridge_attach(encoder, ext_bridge, int_bridge);
@@ -662,68 +763,6 @@ void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
 {
 }
 
-int msm_dsi_manager_phy_enable(int id,
-		const unsigned long bit_rate, const unsigned long esc_rate,
-		u32 *clk_pre, u32 *clk_post)
-{
-	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
-	struct msm_dsi_phy *phy = msm_dsi->phy;
-	int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
-	struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
-	int ret;
-
-	ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate);
-	if (ret)
-		return ret;
-
-	/*
-	 * Reset DSI PHY silently changes its PLL registers to reset status,
-	 * which will confuse clock driver and result in wrong output rate of
-	 * link clocks. Restore PLL status if its PLL is being used as clock
-	 * source.
-	 */
-	if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) {
-		ret = msm_dsi_pll_restore_state(pll);
-		if (ret) {
-			pr_err("%s: failed to restore pll state\n", __func__);
-			msm_dsi_phy_disable(phy);
-			return ret;
-		}
-	}
-
-	msm_dsi->phy_enabled = true;
-	msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
-
-	return 0;
-}
-
-void msm_dsi_manager_phy_disable(int id)
-{
-	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
-	struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
-	struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
-	struct msm_dsi_phy *phy = msm_dsi->phy;
-	struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
-
-	/* Save PLL status if it is a clock source */
-	if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER))
-		msm_dsi_pll_save_state(pll);
-
-	/* disable DSI phy
-	 * In dual-dsi configuration, the phy should be disabled for the
-	 * first controller only when the second controller is disabled.
-	 */
-	msm_dsi->phy_enabled = false;
-	if (IS_DUAL_DSI() && mdsi && sdsi) {
-		if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
-			msm_dsi_phy_disable(sdsi->phy);
-			msm_dsi_phy_disable(mdsi->phy);
-		}
-	} else {
-		msm_dsi_phy_disable(phy);
-	}
-}
-
 int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
 {
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@@ -787,6 +826,33 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
 	return true;
 }
 
+void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_device *dev = msm_dsi->dev;
+	struct msm_drm_private *priv;
+	struct msm_kms *kms;
+	struct drm_encoder *encoder;
+
+	/*
+	 * drm_device pointer is assigned to msm_dsi only in the modeset_init
+	 * path. If mipi_dsi_attach() happens in DSI driver's probe path
+	 * (generally the case when we're connected to a drm_panel of the type
+	 * mipi_dsi_device), this would be NULL. In such cases, try to set the
+	 * encoder mode in the DSI connector's detect() op.
+	 */
+	if (!dev)
+		return;
+
+	priv = dev->dev_private;
+	kms = priv->kms;
+	encoder = msm_dsi_get_encoder(msm_dsi);
+
+	if (encoder && kms->funcs->set_encoder_mode)
+		if (!(device_flags & MIPI_DSI_MODE_VIDEO))
+			kms->funcs->set_encoder_mode(kms, encoder, true);
+}
+
 int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
 {
 	struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
@@ -811,7 +877,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
 		goto fail;
 	}
 
-	ret = dsi_mgr_host_register(id);
+	ret = dsi_mgr_setup_components(id);
 	if (ret) {
 		pr_err("%s: failed to register mipi dsi host for DSI %d\n",
 			__func__, id);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index f39386ed75e4..0c2eb9c9a1fc 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -54,8 +54,10 @@ static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
 }
 
 int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
-	const unsigned long bit_rate, const unsigned long esc_rate)
+			     struct msm_dsi_phy_clk_request *clk_req)
 {
+	const unsigned long bit_rate = clk_req->bitclk_rate;
+	const unsigned long esc_rate = clk_req->escclk_rate;
 	s32 ui, lpx;
 	s32 tmax, tmin;
 	s32 pcnt0 = 10;
@@ -115,8 +117,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
 	temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
 	temp = 60 * coeff + 52 * ui - 24 * ui - temp;
 	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
-	timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
-
+	timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
+						       false);
 	tmax = 63;
 	temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
 	temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
@@ -124,17 +126,21 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
 	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
 	if (tmin > tmax) {
 		temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
-		timing->clk_pre = temp >> 1;
+		timing->shared_timings.clk_pre = temp >> 1;
+		timing->shared_timings.clk_pre_inc_by_2 = true;
 	} else {
-		timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
+		timing->shared_timings.clk_pre =
+				linear_inter(tmax, tmin, pcnt2, 0, false);
+		timing->shared_timings.clk_pre_inc_by_2 = false;
 	}
 
 	timing->ta_go = 3;
 	timing->ta_sure = 0;
 	timing->ta_get = 4;
 
-	DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
-		timing->clk_pre, timing->clk_post, timing->clk_zero,
+	DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+		timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+		timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
 		timing->clk_trail, timing->clk_prepare, timing->hs_exit,
 		timing->hs_zero, timing->hs_prepare, timing->hs_trail,
 		timing->hs_rqst);
@@ -142,6 +148,123 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
 	return 0;
 }
 
+int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
+				struct msm_dsi_phy_clk_request *clk_req)
+{
+	const unsigned long bit_rate = clk_req->bitclk_rate;
+	const unsigned long esc_rate = clk_req->escclk_rate;
+	s32 ui, ui_x8, lpx;
+	s32 tmax, tmin;
+	s32 pcnt0 = 50;
+	s32 pcnt1 = 50;
+	s32 pcnt2 = 10;
+	s32 pcnt3 = 30;
+	s32 pcnt4 = 10;
+	s32 pcnt5 = 2;
+	s32 coeff = 1000; /* Precision, should avoid overflow */
+	s32 hb_en, hb_en_ckln, pd_ckln, pd;
+	s32 val, val_ckln;
+	s32 temp;
+
+	if (!bit_rate || !esc_rate)
+		return -EINVAL;
+
+	timing->hs_halfbyte_en = 0;
+	hb_en = 0;
+	timing->hs_halfbyte_en_ckln = 0;
+	hb_en_ckln = 0;
+	timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
+	pd_ckln = timing->hs_prep_dly_ckln;
+	timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
+	pd = timing->hs_prep_dly;
+
+	val = (hb_en << 2) + (pd << 1);
+	val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
+
+	ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+	ui_x8 = ui << 3;
+	lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+	temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
+	tmin = max_t(s32, temp, 0);
+	temp = (95 * coeff - val_ckln * ui) / ui_x8;
+	tmax = max_t(s32, temp, 0);
+	timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+	temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
+	tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
+	tmax = (tmin > 255) ? 511 : 255;
+	timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+	tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = (temp + 3 * ui) / ui_x8;
+	timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+	temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
+	tmin = max_t(s32, temp, 0);
+	temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
+	tmax = max_t(s32, temp, 0);
+	timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+	temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
+	tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
+	tmax = 255;
+	timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+	tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = (temp + 3 * ui) / ui_x8;
+	timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+	temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+	timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+	tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+	tmax = 255;
+	timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+	temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+	timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+	temp = 60 * coeff + 52 * ui - 43 * ui;
+	tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+	tmax = 63;
+	timing->shared_timings.clk_post =
+				linear_inter(tmax, tmin, pcnt2, 0, false);
+
+	temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
+	temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
+	temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+				(((timing->hs_rqst_ckln << 3) + 8) * ui);
+	tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+	tmax = 63;
+	if (tmin > tmax) {
+		temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+		timing->shared_timings.clk_pre = temp >> 1;
+		timing->shared_timings.clk_pre_inc_by_2 = 1;
+	} else {
+		timing->shared_timings.clk_pre =
+				linear_inter(tmax, tmin, pcnt2, 0, false);
+		timing->shared_timings.clk_pre_inc_by_2 = 0;
+	}
+
+	timing->ta_go = 3;
+	timing->ta_sure = 0;
+	timing->ta_get = 4;
+
+	DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+	    timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+	    timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+	    timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+	    timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+	    timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+	    timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+	    timing->hs_prep_dly_ckln);
+
+	return 0;
+}
+
 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
 				u32 bit_mask)
 {
@@ -268,6 +391,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
 	{ .compatible = "qcom,dsi-phy-28nm-8960",
 	  .data = &dsi_phy_28nm_8960_cfgs },
 #endif
+#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
+	{ .compatible = "qcom,dsi-phy-14nm",
+	  .data = &dsi_phy_14nm_cfgs },
+#endif
 	{}
 };
 
@@ -295,6 +422,24 @@ static int dsi_phy_get_id(struct msm_dsi_phy *phy)
 	return -EINVAL;
 }
 
+int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
+{
+	struct platform_device *pdev = phy->pdev;
+	int ret = 0;
+
+	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
+				"DSI_PHY_REG");
+	if (IS_ERR(phy->reg_base)) {
+		dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
+			__func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+fail:
+	return ret;
+}
+
 static int dsi_phy_driver_probe(struct platform_device *pdev)
 {
 	struct msm_dsi_phy *phy;
@@ -331,15 +476,6 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
 		goto fail;
 	}
 
-	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
-				"DSI_PHY_REG");
-	if (IS_ERR(phy->reg_base)) {
-		dev_err(dev, "%s: failed to map phy regulator base\n",
-			__func__);
-		ret = -ENOMEM;
-		goto fail;
-	}
-
 	ret = dsi_phy_regulator_init(phy);
 	if (ret) {
 		dev_err(dev, "%s: failed to init regulator\n", __func__);
@@ -353,6 +489,12 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
 		goto fail;
 	}
 
+	if (phy->cfg->ops.init) {
+		ret = phy->cfg->ops.init(phy);
+		if (ret)
+			goto fail;
+	}
+
 	/* PLL init will call into clk_register which requires
 	 * register access, so we need to enable power and ahb clock.
 	 */
@@ -410,7 +552,7 @@ void __exit msm_dsi_phy_driver_unregister(void)
 }
 
 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
-	const unsigned long bit_rate, const unsigned long esc_rate)
+			struct msm_dsi_phy_clk_request *clk_req)
 {
 	struct device *dev = &phy->pdev->dev;
 	int ret;
@@ -418,21 +560,52 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	if (!phy || !phy->cfg->ops.enable)
 		return -EINVAL;
 
+	ret = dsi_phy_enable_resource(phy);
+	if (ret) {
+		dev_err(dev, "%s: resource enable failed, %d\n",
+			__func__, ret);
+		goto res_en_fail;
+	}
+
 	ret = dsi_phy_regulator_enable(phy);
 	if (ret) {
 		dev_err(dev, "%s: regulator enable failed, %d\n",
 			__func__, ret);
-		return ret;
+		goto reg_en_fail;
 	}
 
-	ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
+	ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
 	if (ret) {
 		dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
-		dsi_phy_regulator_disable(phy);
-		return ret;
+		goto phy_en_fail;
+	}
+
+	/*
+	 * Resetting DSI PHY silently changes its PLL registers to reset status,
+	 * which will confuse clock driver and result in wrong output rate of
+	 * link clocks. Restore PLL status if its PLL is being used as clock
+	 * source.
+	 */
+	if (phy->usecase != MSM_DSI_PHY_SLAVE) {
+		ret = msm_dsi_pll_restore_state(phy->pll);
+		if (ret) {
+			dev_err(dev, "%s: failed to restore pll state, %d\n",
+				__func__, ret);
+			goto pll_restor_fail;
+		}
 	}
 
 	return 0;
+
+pll_restor_fail:
+	if (phy->cfg->ops.disable)
+		phy->cfg->ops.disable(phy);
+phy_en_fail:
+	dsi_phy_regulator_disable(phy);
+reg_en_fail:
+	dsi_phy_disable_resource(phy);
+res_en_fail:
+	return ret;
 }
 
 void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
@@ -440,21 +613,21 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
 	if (!phy || !phy->cfg->ops.disable)
 		return;
 
+	/* Save PLL status if it is a clock source */
+	if (phy->usecase != MSM_DSI_PHY_SLAVE)
+		msm_dsi_pll_save_state(phy->pll);
+
 	phy->cfg->ops.disable(phy);
 
 	dsi_phy_regulator_disable(phy);
+	dsi_phy_disable_resource(phy);
 }
 
-void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
-					u32 *clk_pre, u32 *clk_post)
+void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
+			struct msm_dsi_phy_shared_timings *shared_timings)
 {
-	if (!phy)
-		return;
-
-	if (clk_pre)
-		*clk_pre = phy->timing.clk_pre;
-	if (clk_post)
-		*clk_post = phy->timing.clk_post;
+	memcpy(shared_timings, &phy->timing.shared_timings,
+	       sizeof(*shared_timings));
 }
 
 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
@@ -465,3 +638,9 @@ struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
 	return phy->pll;
 }
 
+void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
+			     enum msm_dsi_phy_usecase uc)
+{
+	if (phy)
+		phy->usecase = uc;
+}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index f24a85439b94..1733f6608a09 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -22,8 +22,9 @@
 #define dsi_phy_write(offset, data) msm_writel((data), (offset))
 
 struct msm_dsi_phy_ops {
+	int (*init) (struct msm_dsi_phy *phy);
 	int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
-		const unsigned long bit_rate, const unsigned long esc_rate);
+			struct msm_dsi_phy_clk_request *clk_req);
 	void (*disable)(struct msm_dsi_phy *phy);
 };
 
@@ -46,6 +47,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
 
 struct msm_dsi_dphy_timing {
 	u32 clk_pre;
@@ -61,12 +63,22 @@ struct msm_dsi_dphy_timing {
 	u32 ta_go;
 	u32 ta_sure;
 	u32 ta_get;
+
+	struct msm_dsi_phy_shared_timings shared_timings;
+
+	/* For PHY v2 only */
+	u32 hs_rqst_ckln;
+	u32 hs_prep_dly;
+	u32 hs_prep_dly_ckln;
+	u8 hs_halfbyte_en;
+	u8 hs_halfbyte_en_ckln;
 };
 
 struct msm_dsi_phy {
 	struct platform_device *pdev;
 	void __iomem *base;
 	void __iomem *reg_base;
+	void __iomem *lane_base;
 	int id;
 
 	struct clk *ahb_clk;
@@ -75,6 +87,7 @@ struct msm_dsi_phy {
 	struct msm_dsi_dphy_timing timing;
 	const struct msm_dsi_phy_cfg *cfg;
 
+	enum msm_dsi_phy_usecase usecase;
 	bool regulator_ldo_mode;
 
 	struct msm_dsi_pll *pll;
@@ -84,9 +97,12 @@ struct msm_dsi_phy {
  * PHY internal functions
  */
 int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
-	const unsigned long bit_rate, const unsigned long esc_rate);
+			     struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
+				struct msm_dsi_phy_clk_request *clk_req);
 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
 				u32 bit_mask);
+int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
 
 #endif /* __DSI_PHY_H__ */
 
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
new file mode 100644
index 000000000000..513f4234adc1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+#define PHY_14NM_CKLN_IDX	4
+
+static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
+				     struct msm_dsi_dphy_timing *timing,
+				     int lane_idx)
+{
+	void __iomem *base = phy->lane_base;
+	bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX);
+	u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero;
+	u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare;
+	u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail;
+	u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst;
+	u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly;
+	u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln :
+				   timing->hs_halfbyte_en;
+
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx),
+		      DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx),
+		      halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0);
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		      DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0));
+}
+
+static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+			       struct msm_dsi_phy_clk_request *clk_req)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	u32 data;
+	int i;
+	int ret;
+	void __iomem *base = phy->base;
+	void __iomem *lane_base = phy->lane_base;
+
+	if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
+		dev_err(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	data = 0x1c;
+	if (phy->usecase != MSM_DSI_PHY_STANDALONE)
+		data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32);
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
+
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1);
+
+	/* 4 data lanes + 1 clk lane configuration */
+	for (i = 0; i < 5; i++) {
+		dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i),
+			      0x1d);
+
+		dsi_phy_write(lane_base +
+			      REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff);
+		dsi_phy_write(lane_base +
+			      REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i),
+			      (i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06);
+
+		dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i),
+			      (i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f);
+		dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10);
+		dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i),
+			      0);
+		dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i),
+			      0x88);
+
+		dsi_14nm_dphy_set_timing(phy, timing, i);
+	}
+
+	/* Make sure PLL is not start */
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+	wmb(); /* make sure everything is written before reset and enable */
+
+	/* reset digital block */
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80);
+	wmb(); /* ensure reset is asserted */
+	udelay(100);
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00);
+
+	msm_dsi_phy_set_src_pll(phy, src_pll_id,
+				REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL,
+				DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+	ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+	if (ret) {
+		dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	/* Remove power down from PLL and all lanes */
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff);
+
+	return 0;
+}
+
+static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0);
+	dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0);
+
+	/* ensure that the phy is completely disabled */
+	wmb();
+}
+
+static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
+{
+	struct platform_device *pdev = phy->pdev;
+
+	phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
+				"DSI_PHY_LANE");
+	if (IS_ERR(phy->lane_base)) {
+		dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
+	.type = MSM_DSI_PHY_14NM,
+	.src_pll_truthtable = { {false, false}, {true, false} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vcca", 17000, 32},
+		},
+	},
+	.ops = {
+		.enable = dsi_14nm_phy_enable,
+		.disable = dsi_14nm_phy_disable,
+		.init = dsi_14nm_phy_init,
+	},
+	.io_start = { 0x994400, 0x996400 },
+	.num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index c757e2070cac..1ca6c69516f5 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -72,7 +72,7 @@ static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
 }
 
 static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
-		const unsigned long bit_rate, const unsigned long esc_rate)
+				struct msm_dsi_phy_clk_request *clk_req)
 {
 	struct msm_dsi_dphy_timing *timing = &phy->timing;
 	int i;
@@ -81,7 +81,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 
 	DBG("");
 
-	if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
 		dev_err(&phy->pdev->dev,
 			"%s: D-PHY timing calculation failed\n", __func__);
 		return -EINVAL;
@@ -145,6 +145,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
 	.ops = {
 		.enable = dsi_20nm_phy_enable,
 		.disable = dsi_20nm_phy_disable,
+		.init = msm_dsi_phy_init_common,
 	},
 	.io_start = { 0xfd998300, 0xfd9a0300 },
 	.num_dsi_phy = 2,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 63d7fba31380..4972b52cbe44 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -67,7 +67,7 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
 }
 
 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
-		const unsigned long bit_rate, const unsigned long esc_rate)
+				struct msm_dsi_phy_clk_request *clk_req)
 {
 	struct msm_dsi_dphy_timing *timing = &phy->timing;
 	int i;
@@ -75,7 +75,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 
 	DBG("");
 
-	if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
 		dev_err(&phy->pdev->dev,
 			"%s: D-PHY timing calculation failed\n", __func__);
 		return -EINVAL;
@@ -144,6 +144,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
 	.ops = {
 		.enable = dsi_28nm_phy_enable,
 		.disable = dsi_28nm_phy_disable,
+		.init = msm_dsi_phy_init_common,
 	},
 	.io_start = { 0xfd922b00, 0xfd923100 },
 	.num_dsi_phy = 2,
@@ -161,6 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
 	.ops = {
 		.enable = dsi_28nm_phy_enable,
 		.disable = dsi_28nm_phy_disable,
+		.init = msm_dsi_phy_init_common,
 	},
 	.io_start = { 0x1a98500 },
 	.num_dsi_phy = 1,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 7bdb9de54968..398004463498 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -124,14 +124,14 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
 }
 
 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
-		const unsigned long bit_rate, const unsigned long esc_rate)
+				struct msm_dsi_phy_clk_request *clk_req)
 {
 	struct msm_dsi_dphy_timing *timing = &phy->timing;
 	void __iomem *base = phy->base;
 
 	DBG("");
 
-	if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
 		dev_err(&phy->pdev->dev,
 			"%s: D-PHY timing calculation failed\n", __func__);
 		return -EINVAL;
@@ -191,6 +191,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
 	.ops = {
 		.enable = dsi_28nm_phy_enable,
 		.disable = dsi_28nm_phy_disable,
+		.init = msm_dsi_phy_init_common,
 	},
 	.io_start = { 0x4700300, 0x5800300 },
 	.num_dsi_phy = 2,
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 5cd438f91afe..bc289f5c9078 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -140,6 +140,15 @@ int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
 	return 0;
 }
 
+int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+			    enum msm_dsi_phy_usecase uc)
+{
+	if (pll->set_usecase)
+		return pll->set_usecase(pll, uc);
+
+	return 0;
+}
+
 struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 			enum msm_dsi_phy_type type, int id)
 {
@@ -154,6 +163,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 	case MSM_DSI_PHY_28NM_8960:
 		pll = msm_dsi_pll_28nm_8960_init(pdev, id);
 		break;
+	case MSM_DSI_PHY_14NM:
+		pll = msm_dsi_pll_14nm_init(pdev, id);
+		break;
 	default:
 		pll = ERR_PTR(-ENXIO);
 		break;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 2cf1664723e8..f63e7ada74a8 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -41,6 +41,8 @@ struct msm_dsi_pll {
 	void (*destroy)(struct msm_dsi_pll *pll);
 	void (*save_state)(struct msm_dsi_pll *pll);
 	int (*restore_state)(struct msm_dsi_pll *pll);
+	int (*set_usecase)(struct msm_dsi_pll *pll,
+			   enum msm_dsi_phy_usecase uc);
 };
 
 #define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw)
@@ -104,5 +106,14 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(
 }
 #endif
 
+#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id);
+#else
+static inline struct msm_dsi_pll *
+msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif
 #endif /* __DSI_PLL_H__ */
 
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
new file mode 100644
index 000000000000..fe15aa64086f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
@@ -0,0 +1,1104 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 14nm - clock diagram (eg: DSI0):
+ *
+ *         dsi0n1_postdiv_clk
+ *                         |
+ *                         |
+ *                 +----+  |  +----+
+ *  dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
+ *                 +----+  |  +----+
+ *                         |           dsi0n1_postdivby2_clk
+ *                         |   +----+  |
+ *                         o---| /2 |--o--|\
+ *                         |   +----+     | \   +----+
+ *                         |              |  |--| n2 |-- dsi0pll
+ *                         o--------------| /   +----+
+ *                                        |/
+ */
+
+#define POLL_MAX_READS			15
+#define POLL_TIMEOUT_US			1000
+
+#define NUM_PROVIDED_CLKS		2
+
+#define VCO_REF_CLK_RATE		19200000
+#define VCO_MIN_RATE			1300000000UL
+#define VCO_MAX_RATE			2600000000UL
+
+#define DSI_BYTE_PLL_CLK		0
+#define DSI_PIXEL_PLL_CLK		1
+
+#define DSI_PLL_DEFAULT_VCO_POSTDIV	1
+
+struct dsi_pll_input {
+	u32 fref;	/* reference clk */
+	u32 fdata;	/* bit clock rate */
+	u32 dsiclk_sel; /* Mux configuration (see diagram) */
+	u32 ssc_en;	/* SSC enable/disable */
+	u32 ldo_en;
+
+	/* fixed params */
+	u32 refclk_dbler_en;
+	u32 vco_measure_time;
+	u32 kvco_measure_time;
+	u32 bandgap_timer;
+	u32 pll_wakeup_timer;
+	u32 plllock_cnt;
+	u32 plllock_rng;
+	u32 ssc_center;
+	u32 ssc_adj_period;
+	u32 ssc_spread;
+	u32 ssc_freq;
+	u32 pll_ie_trim;
+	u32 pll_ip_trim;
+	u32 pll_iptat_trim;
+	u32 pll_cpcset_cur;
+	u32 pll_cpmset_cur;
+
+	u32 pll_icpmset;
+	u32 pll_icpcset;
+
+	u32 pll_icpmset_p;
+	u32 pll_icpmset_m;
+
+	u32 pll_icpcset_p;
+	u32 pll_icpcset_m;
+
+	u32 pll_lpf_res1;
+	u32 pll_lpf_cap1;
+	u32 pll_lpf_cap2;
+	u32 pll_c3ctrl;
+	u32 pll_r3ctrl;
+};
+
+struct dsi_pll_output {
+	u32 pll_txclk_en;
+	u32 dec_start;
+	u32 div_frac_start;
+	u32 ssc_period;
+	u32 ssc_step_size;
+	u32 plllock_cmp;
+	u32 pll_vco_div_ref;
+	u32 pll_vco_count;
+	u32 pll_kvco_div_ref;
+	u32 pll_kvco_count;
+	u32 pll_misc1;
+	u32 pll_lpf2_postdiv;
+	u32 pll_resetsm_cntrl;
+	u32 pll_resetsm_cntrl2;
+	u32 pll_resetsm_cntrl5;
+	u32 pll_kvco_code;
+
+	u32 cmn_clk_cfg0;
+	u32 cmn_clk_cfg1;
+	u32 cmn_ldo_cntrl;
+
+	u32 pll_postdiv;
+	u32 fcvo;
+};
+
+struct pll_14nm_cached_state {
+	unsigned long vco_rate;
+	u8 n2postdiv;
+	u8 n1postdiv;
+};
+
+struct dsi_pll_14nm {
+	struct msm_dsi_pll base;
+
+	int id;
+	struct platform_device *pdev;
+
+	void __iomem *phy_cmn_mmio;
+	void __iomem *mmio;
+
+	int vco_delay;
+
+	struct dsi_pll_input in;
+	struct dsi_pll_output out;
+
+	/* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
+	spinlock_t postdiv_lock;
+
+	u64 vco_current_rate;
+	u64 vco_ref_clk_rate;
+
+	/* private clocks: */
+	struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
+	u32 num_hws;
+
+	/* clock-provider: */
+	struct clk_hw_onecell_data *hw_data;
+
+	struct pll_14nm_cached_state cached_state;
+
+	enum msm_dsi_phy_usecase uc;
+	struct dsi_pll_14nm *slave;
+};
+
+#define to_pll_14nm(x)	container_of(x, struct dsi_pll_14nm, base)
+
+/*
+ * Private struct for N1/N2 post-divider clocks. These clocks are similar to
+ * the generic clk_divider class of clocks. The only difference is that it
+ * also sets the slave DSI PLL's post-dividers if in Dual DSI mode
+ */
+struct dsi_pll_14nm_postdiv {
+	struct clk_hw hw;
+
+	/* divider params */
+	u8 shift;
+	u8 width;
+	u8 flags; /* same flags as used by clk_divider struct */
+
+	struct dsi_pll_14nm *pll;
+};
+
+#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
+
+static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
+				    u32 nb_tries, u32 timeout_us)
+{
+	bool pll_locked = false;
+	void __iomem *base = pll_14nm->mmio;
+	u32 tries, val;
+
+	tries = nb_tries;
+	while (tries--) {
+		val = pll_read(base +
+			       REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+		pll_locked = !!(val & BIT(5));
+
+		if (pll_locked)
+			break;
+
+		udelay(timeout_us);
+	}
+
+	if (!pll_locked) {
+		tries = nb_tries;
+		while (tries--) {
+			val = pll_read(base +
+				REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+			pll_locked = !!(val & BIT(0));
+
+			if (pll_locked)
+				break;
+
+			udelay(timeout_us);
+		}
+	}
+
+	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+	return pll_locked;
+}
+
+static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll)
+{
+	pll->in.fref = pll->vco_ref_clk_rate;
+	pll->in.fdata = 0;
+	pll->in.dsiclk_sel = 1;	/* Use the /2 path in Mux */
+	pll->in.ldo_en = 0;	/* disabled for now */
+
+	/* fixed input */
+	pll->in.refclk_dbler_en = 0;
+	pll->in.vco_measure_time = 5;
+	pll->in.kvco_measure_time = 5;
+	pll->in.bandgap_timer = 4;
+	pll->in.pll_wakeup_timer = 5;
+	pll->in.plllock_cnt = 1;
+	pll->in.plllock_rng = 0;
+
+	/*
+	 * SSC is enabled by default. We might need DT props for configuring
+	 * some SSC params like PPM and center/down spread etc.
+	 */
+	pll->in.ssc_en = 1;
+	pll->in.ssc_center = 0;		/* down spread by default */
+	pll->in.ssc_spread = 5;		/* PPM / 1000 */
+	pll->in.ssc_freq = 31500;	/* default recommended */
+	pll->in.ssc_adj_period = 37;
+
+	pll->in.pll_ie_trim = 4;
+	pll->in.pll_ip_trim = 4;
+	pll->in.pll_cpcset_cur = 1;
+	pll->in.pll_cpmset_cur = 1;
+	pll->in.pll_icpmset = 4;
+	pll->in.pll_icpcset = 4;
+	pll->in.pll_icpmset_p = 0;
+	pll->in.pll_icpmset_m = 0;
+	pll->in.pll_icpcset_p = 0;
+	pll->in.pll_icpcset_m = 0;
+	pll->in.pll_lpf_res1 = 3;
+	pll->in.pll_lpf_cap1 = 11;
+	pll->in.pll_lpf_cap2 = 1;
+	pll->in.pll_iptat_trim = 7;
+	pll->in.pll_c3ctrl = 2;
+	pll->in.pll_r3ctrl = 1;
+}
+
+#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))
+
+static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll)
+{
+	u32 period, ssc_period;
+	u32 ref, rem;
+	u64 step_size;
+
+	DBG("vco=%lld ref=%lld", pll->vco_current_rate, pll->vco_ref_clk_rate);
+
+	ssc_period = pll->in.ssc_freq / 500;
+	period = (u32)pll->vco_ref_clk_rate / 1000;
+	ssc_period  = CEIL(period, ssc_period);
+	ssc_period -= 1;
+	pll->out.ssc_period = ssc_period;
+
+	DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq,
+	    pll->in.ssc_spread, pll->out.ssc_period);
+
+	step_size = (u32)pll->vco_current_rate;
+	ref = pll->vco_ref_clk_rate;
+	ref /= 1000;
+	step_size = div_u64(step_size, ref);
+	step_size <<= 20;
+	step_size = div_u64(step_size, 1000);
+	step_size *= pll->in.ssc_spread;
+	step_size = div_u64(step_size, 1000);
+	step_size *= (pll->in.ssc_adj_period + 1);
+
+	rem = 0;
+	step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
+	if (rem)
+		step_size++;
+
+	DBG("step_size=%lld", step_size);
+
+	step_size &= 0x0ffff;	/* take lower 16 bits */
+
+	pll->out.ssc_step_size = step_size;
+}
+
+static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll)
+{
+	struct dsi_pll_input *pin = &pll->in;
+	struct dsi_pll_output *pout = &pll->out;
+	u64 multiplier = BIT(20);
+	u64 dec_start_multiple, dec_start, pll_comp_val;
+	u32 duration, div_frac_start;
+	u64 vco_clk_rate = pll->vco_current_rate;
+	u64 fref = pll->vco_ref_clk_rate;
+
+	DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
+
+	dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
+	div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+	dec_start = div_u64(dec_start_multiple, multiplier);
+
+	pout->dec_start = (u32)dec_start;
+	pout->div_frac_start = div_frac_start;
+
+	if (pin->plllock_cnt == 0)
+		duration = 1024;
+	else if (pin->plllock_cnt == 1)
+		duration = 256;
+	else if (pin->plllock_cnt == 2)
+		duration = 128;
+	else
+		duration = 32;
+
+	pll_comp_val = duration * dec_start_multiple;
+	pll_comp_val = div_u64(pll_comp_val, multiplier);
+	do_div(pll_comp_val, 10);
+
+	pout->plllock_cmp = (u32)pll_comp_val;
+
+	pout->pll_txclk_en = 1;
+	pout->cmn_ldo_cntrl = 0x3c;
+}
+
+static u32 pll_14nm_kvco_slop(u32 vrate)
+{
+	u32 slop = 0;
+
+	if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
+		slop =  600;
+	else if (vrate > 1800000000UL && vrate < 2300000000UL)
+		slop = 400;
+	else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
+		slop = 280;
+
+	return slop;
+}
+
+static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll)
+{
+	struct dsi_pll_input *pin = &pll->in;
+	struct dsi_pll_output *pout = &pll->out;
+	u64 vco_clk_rate = pll->vco_current_rate;
+	u64 fref = pll->vco_ref_clk_rate;
+	u64 data;
+	u32 cnt;
+
+	data = fref * pin->vco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 2;
+	pout->pll_vco_div_ref = data;
+
+	data = div_u64(vco_clk_rate, 1000000);	/* unit is Mhz */
+	data *= pin->vco_measure_time;
+	do_div(data, 10);
+	pout->pll_vco_count = data;
+
+	data = fref * pin->kvco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 1;
+	pout->pll_kvco_div_ref = data;
+
+	cnt = pll_14nm_kvco_slop(vco_clk_rate);
+	cnt *= 2;
+	cnt /= 100;
+	cnt *= pin->kvco_measure_time;
+	pout->pll_kvco_count = cnt;
+
+	pout->pll_misc1 = 16;
+	pout->pll_resetsm_cntrl = 48;
+	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
+	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
+	pout->pll_kvco_code = 0;
+}
+
+static void pll_db_commit_ssc(struct dsi_pll_14nm *pll)
+{
+	void __iomem *base = pll->mmio;
+	struct dsi_pll_input *pin = &pll->in;
+	struct dsi_pll_output *pout = &pll->out;
+	u8 data;
+
+	data = pin->ssc_adj_period;
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
+	data = (pin->ssc_adj_period >> 8);
+	data &= 0x03;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
+
+	data = pout->ssc_period;
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
+	data = (pout->ssc_period >> 8);
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
+
+	data = pout->ssc_step_size;
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
+	data = (pout->ssc_step_size >> 8);
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
+
+	data = (pin->ssc_center & 0x01);
+	data <<= 1;
+	data |= 0x01; /* enable */
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
+
+	wmb();	/* make sure register committed */
+}
+
+static void pll_db_commit_common(struct dsi_pll_14nm *pll,
+				 struct dsi_pll_input *pin,
+				 struct dsi_pll_output *pout)
+{
+	void __iomem *base = pll->mmio;
+	u8 data;
+
+	/* confgiure the non frequency dependent pll registers */
+	data = 0;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
+
+	data = pout->pll_txclk_en;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data);
+
+	data = pout->pll_resetsm_cntrl;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data);
+	data = pout->pll_resetsm_cntrl2;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data);
+	data = pout->pll_resetsm_cntrl5;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data);
+
+	data = pout->pll_vco_div_ref & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
+	data = (pout->pll_vco_div_ref >> 8) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
+
+	data = pout->pll_kvco_div_ref & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
+	data = (pout->pll_kvco_div_ref >> 8) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
+
+	data = pout->pll_misc1;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data);
+
+	data = pin->pll_ie_trim;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data);
+
+	data = pin->pll_ip_trim;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data);
+
+	data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data);
+
+	data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data);
+
+	data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data);
+
+	data = pin->pll_icpmset << 3 | pin->pll_icpcset;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data);
+
+	data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data);
+
+	data = pin->pll_iptat_trim;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data);
+
+	data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data);
+}
+
+static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
+{
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+
+	/* de assert pll start and apply pll sw reset */
+
+	/* stop pll */
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+
+	/* pll sw reset */
+	pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
+	wmb();	/* make sure register committed */
+
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
+	wmb();	/* make sure register committed */
+}
+
+static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
+			       struct dsi_pll_input *pin,
+			       struct dsi_pll_output *pout)
+{
+	void __iomem *base = pll->mmio;
+	void __iomem *cmn_base = pll->phy_cmn_mmio;
+	u8 data;
+
+	DBG("DSI%d PLL", pll->id);
+
+	data = pout->cmn_ldo_cntrl;
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
+
+	pll_db_commit_common(pll, pin, pout);
+
+	pll_14nm_software_reset(pll);
+
+	data = pin->dsiclk_sel; /* set dsiclk_sel = 1  */
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data);
+
+	data = 0xff; /* data, clk, pll normal operation */
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
+
+	/* configure the frequency dependent pll registers */
+	data = pout->dec_start;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
+
+	data = pout->div_frac_start & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
+	data = (pout->div_frac_start >> 8) & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
+	data = (pout->div_frac_start >> 16) & 0xf;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
+
+	data = pout->plllock_cmp & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
+
+	data = (pout->plllock_cmp >> 8) & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
+
+	data = (pout->plllock_cmp >> 16) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
+
+	data = pin->plllock_cnt << 1 | pin->plllock_rng << 3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
+
+	data = pout->pll_vco_count & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
+	data = (pout->pll_vco_count >> 8) & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
+
+	data = pout->pll_kvco_count & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
+	data = (pout->pll_kvco_count >> 8) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
+
+	data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data);
+
+	if (pin->ssc_en)
+		pll_db_commit_ssc(pll);
+
+	wmb();	/* make sure register committed */
+}
+
+/*
+ * VCO clock Callbacks
+ */
+static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct dsi_pll_input *pin = &pll_14nm->in;
+	struct dsi_pll_output *pout = &pll_14nm->out;
+
+	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate,
+	    parent_rate);
+
+	pll_14nm->vco_current_rate = rate;
+	pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
+
+	dsi_pll_14nm_input_init(pll_14nm);
+
+	/*
+	 * This configures the post divider internal to the VCO. It's
+	 * fixed to divide by 1 for now.
+	 *
+	 * tx_band = pll_postdiv.
+	 * 0: divided by 1
+	 * 1: divided by 2
+	 * 2: divided by 4
+	 * 3: divided by 8
+	 */
+	pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV;
+
+	pll_14nm_dec_frac_calc(pll_14nm);
+
+	if (pin->ssc_en)
+		pll_14nm_ssc_calc(pll_14nm);
+
+	pll_14nm_calc_vco_count(pll_14nm);
+
+	/* commit the slave DSI PLL registers if we're master. Note that we
+	 * don't lock the slave PLL. We just ensure that the PLL/PHY registers
+	 * of the master and slave are identical
+	 */
+	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+
+		pll_db_commit_14nm(pll_14nm_slave, pin, pout);
+	}
+
+	pll_db_commit_14nm(pll_14nm, pin, pout);
+
+	return 0;
+}
+
+static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
+						  unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *base = pll_14nm->mmio;
+	u64 vco_rate, multiplier = BIT(20);
+	u32 div_frac_start;
+	u32 dec_start;
+	u64 ref_clk = parent_rate;
+
+	dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
+	dec_start &= 0x0ff;
+
+	DBG("dec_start = %x", dec_start);
+
+	div_frac_start = (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
+				& 0xf) << 16;
+	div_frac_start |= (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
+				& 0xff) << 8;
+	div_frac_start |= pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
+				& 0xff;
+
+	DBG("div_frac_start = %x", div_frac_start);
+
+	vco_rate = ref_clk * dec_start;
+
+	vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+	/*
+	 * Recalculating the rate from dec_start and frac_start doesn't end up
+	 * the rate we originally set. Convert the freq to KHz, round it up and
+	 * convert it back to MHz.
+	 */
+	vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
+
+	DBG("returning vco rate = %lu", (unsigned long)vco_rate);
+
+	return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
+	.round_rate = msm_dsi_pll_helper_clk_round_rate,
+	.set_rate = dsi_pll_14nm_vco_set_rate,
+	.recalc_rate = dsi_pll_14nm_vco_recalc_rate,
+	.prepare = msm_dsi_pll_helper_clk_prepare,
+	.unprepare = msm_dsi_pll_helper_clk_unprepare,
+};
+
+/*
+ * N1 and N2 post-divider clock callbacks
+ */
+#define div_mask(width)	((1 << (width)) - 1)
+static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
+						      unsigned long parent_rate)
+{
+	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+	void __iomem *base = pll_14nm->phy_cmn_mmio;
+	u8 shift = postdiv->shift;
+	u8 width = postdiv->width;
+	u32 val;
+
+	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate);
+
+	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
+	val &= div_mask(width);
+
+	return divider_recalc_rate(hw, parent_rate, val, NULL,
+				   postdiv->flags);
+}
+
+static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
+					    unsigned long rate,
+					    unsigned long *prate)
+{
+	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+
+	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate);
+
+	return divider_round_rate(hw, rate, prate, NULL,
+				  postdiv->width,
+				  postdiv->flags);
+}
+
+static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+					 unsigned long parent_rate)
+{
+	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+	void __iomem *base = pll_14nm->phy_cmn_mmio;
+	spinlock_t *lock = &pll_14nm->postdiv_lock;
+	u8 shift = postdiv->shift;
+	u8 width = postdiv->width;
+	unsigned int value;
+	unsigned long flags = 0;
+	u32 val;
+
+	DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate,
+	    parent_rate);
+
+	value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
+				postdiv->flags);
+
+	spin_lock_irqsave(lock, flags);
+
+	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+	val &= ~(div_mask(width) << shift);
+
+	val |= value << shift;
+	pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+
+	/* If we're master in dual DSI mode, then the slave PLL's post-dividers
+	 * follow the master's post dividers
+	 */
+	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
+
+		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+	}
+
+	spin_unlock_irqrestore(lock, flags);
+
+	return 0;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
+	.recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
+	.round_rate = dsi_pll_14nm_postdiv_round_rate,
+	.set_rate = dsi_pll_14nm_postdiv_set_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *base = pll_14nm->mmio;
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+	bool locked;
+
+	DBG("");
+
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
+
+	locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
+					 POLL_TIMEOUT_US);
+
+	if (unlikely(!locked))
+		dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
+	else
+		DBG("DSI PLL lock success");
+
+	return locked ? 0 : -EINVAL;
+}
+
+static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+
+	DBG("");
+
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+}
+
+static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+	u32 data;
+
+	data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+
+	cached_state->n1postdiv = data & 0xf;
+	cached_state->n2postdiv = (data >> 4) & 0xf;
+
+	DBG("DSI%d PLL save state %x %x", pll_14nm->id,
+	    cached_state->n1postdiv, cached_state->n2postdiv);
+
+	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
+}
+
+static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+	u32 data;
+	int ret;
+
+	ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
+					cached_state->vco_rate, 0);
+	if (ret) {
+		dev_err(&pll_14nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
+
+	DBG("DSI%d PLL restore state %x %x", pll_14nm->id,
+	    cached_state->n1postdiv, cached_state->n2postdiv);
+
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+
+	/* also restore post-dividers for slave DSI PLL */
+	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
+
+		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+	}
+
+	return 0;
+}
+
+static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll,
+				    enum msm_dsi_phy_usecase uc)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *base = pll_14nm->mmio;
+	u32 clkbuflr_en, bandgap = 0;
+
+	switch (uc) {
+	case MSM_DSI_PHY_STANDALONE:
+		clkbuflr_en = 0x1;
+		break;
+	case MSM_DSI_PHY_MASTER:
+		clkbuflr_en = 0x3;
+		pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX];
+		break;
+	case MSM_DSI_PHY_SLAVE:
+		clkbuflr_en = 0x0;
+		bandgap = 0x3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
+	if (bandgap)
+		pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
+
+	pll_14nm->uc = uc;
+
+	return 0;
+}
+
+static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll,
+				     struct clk **byte_clk_provider,
+				     struct clk **pixel_clk_provider)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data;
+
+	if (byte_clk_provider)
+		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+	if (pixel_clk_provider)
+		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+	return 0;
+}
+
+static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct platform_device *pdev = pll_14nm->pdev;
+	int num_hws = pll_14nm->num_hws;
+
+	of_clk_del_provider(pdev->dev.of_node);
+
+	while (num_hws--)
+		clk_hw_unregister(pll_14nm->hws[num_hws]);
+}
+
+static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
+						const char *name,
+						const char *parent_name,
+						unsigned long flags,
+						u8 shift)
+{
+	struct dsi_pll_14nm_postdiv *pll_postdiv;
+	struct device *dev = &pll_14nm->pdev->dev;
+	struct clk_init_data postdiv_init = {
+		.parent_names = (const char *[]) { parent_name },
+		.num_parents = 1,
+		.name = name,
+		.flags = flags,
+		.ops = &clk_ops_dsi_pll_14nm_postdiv,
+	};
+	int ret;
+
+	pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
+	if (!pll_postdiv)
+		return ERR_PTR(-ENOMEM);
+
+	pll_postdiv->pll = pll_14nm;
+	pll_postdiv->shift = shift;
+	/* both N1 and N2 postdividers are 4 bits wide */
+	pll_postdiv->width = 4;
+	/* range of each divider is from 1 to 15 */
+	pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
+	pll_postdiv->hw.init = &postdiv_init;
+
+	ret = clk_hw_register(dev, &pll_postdiv->hw);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return &pll_postdiv->hw;
+}
+
+static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
+{
+	char clk_name[32], parent[32], vco_name[32];
+	struct clk_init_data vco_init = {
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.name = vco_name,
+		.flags = CLK_IGNORE_UNUSED,
+		.ops = &clk_ops_dsi_pll_14nm_vco,
+	};
+	struct device *dev = &pll_14nm->pdev->dev;
+	struct clk_hw **hws = pll_14nm->hws;
+	struct clk_hw_onecell_data *hw_data;
+	struct clk_hw *hw;
+	int num = 0;
+	int ret;
+
+	DBG("DSI%d", pll_14nm->id);
+
+	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+			       GFP_KERNEL);
+	if (!hw_data)
+		return -ENOMEM;
+
+	snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id);
+	pll_14nm->base.clk_hw.init = &vco_init;
+
+	ret = clk_hw_register(dev, &pll_14nm->base.clk_hw);
+	if (ret)
+		return ret;
+
+	hws[num++] = &pll_14nm->base.clk_hw;
+
+	snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id);
+
+	/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
+	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
+				       CLK_SET_RATE_PARENT, 0);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+
+	snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+
+	/* DSI Byte clock = VCO_CLK / N1 / 8 */
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  CLK_SET_RATE_PARENT, 1, 8);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+	snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+
+	/*
+	 * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
+	 * on the way. Don't let it set parent.
+	 */
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+
+	snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
+
+	/* DSI pixel clock = VCO_CLK / N1 / 2 / N2
+	 * This is the output of N2 post-divider, bits 4-7 in
+	 * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
+	 */
+	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+	hw_data->hws[DSI_PIXEL_PLL_CLK]	= hw;
+
+	pll_14nm->num_hws = num;
+
+	hw_data->num = NUM_PROVIDED_CLKS;
+	pll_14nm->hw_data = hw_data;
+
+	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+				     pll_14nm->hw_data);
+	if (ret) {
+		dev_err(dev, "failed to register clk provider: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
+{
+	struct dsi_pll_14nm *pll_14nm;
+	struct msm_dsi_pll *pll;
+	int ret;
+
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
+	if (!pll_14nm)
+		return ERR_PTR(-ENOMEM);
+
+	DBG("PLL%d", id);
+
+	pll_14nm->pdev = pdev;
+	pll_14nm->id = id;
+	pll_14nm_list[id] = pll_14nm;
+
+	pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+	if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
+		dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+	if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
+		dev_err(&pdev->dev, "failed to map PLL base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&pll_14nm->postdiv_lock);
+
+	pll = &pll_14nm->base;
+	pll->min_rate = VCO_MIN_RATE;
+	pll->max_rate = VCO_MAX_RATE;
+	pll->get_provider = dsi_pll_14nm_get_provider;
+	pll->destroy = dsi_pll_14nm_destroy;
+	pll->disable_seq = dsi_pll_14nm_disable_seq;
+	pll->save_state = dsi_pll_14nm_save_state;
+	pll->restore_state = dsi_pll_14nm_restore_state;
+	pll->set_usecase = dsi_pll_14nm_set_usecase;
+
+	pll_14nm->vco_delay = 1;
+
+	pll->en_seq_cnt = 1;
+	pll->enable_seqs[0] = dsi_pll_14nm_enable_seq;
+
+	ret = pll_14nm_register(pll_14nm);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	return pll;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index b782efd4b95f..94ea963519b2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -260,8 +260,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 	struct drm_encoder *encoder;
 	struct drm_connector *connector;
 	struct device_node *panel_node;
-	struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
-	int i, dsi_id;
+	int dsi_id;
 	int ret;
 
 	switch (intf_type) {
@@ -322,22 +321,19 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 		if (!priv->dsi[dsi_id])
 			break;
 
-		for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
-			dsi_encs[i] = mdp4_dsi_encoder_init(dev);
-			if (IS_ERR(dsi_encs[i])) {
-				ret = PTR_ERR(dsi_encs[i]);
-				dev_err(dev->dev,
-					"failed to construct DSI encoder: %d\n",
-					ret);
-				return ret;
-			}
-
-			/* TODO: Add DMA_S later? */
-			dsi_encs[i]->possible_crtcs = 1 << DMA_P;
-			priv->encoders[priv->num_encoders++] = dsi_encs[i];
+		encoder = mdp4_dsi_encoder_init(dev);
+		if (IS_ERR(encoder)) {
+			ret = PTR_ERR(encoder);
+			dev_err(dev->dev,
+				"failed to construct DSI encoder: %d\n", ret);
+			return ret;
 		}
 
-		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
+		/* TODO: Add DMA_S later? */
+		encoder->possible_crtcs = 1 << DMA_P;
+		priv->encoders[priv->num_encoders++] = encoder;
+
+		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
 		if (ret) {
 			dev_err(dev->dev, "failed to initialize DSI: %d\n",
 				ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 27d5371acee0..e6dfc518d4db 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36965 bytes, from 2016-11-26 23:01:08)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  27887 bytes, from 2015-10-22 16:34:52)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
-
-Copyright (C) 2013-2016 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml   (  37411 bytes, from 2017-01-11 05:19:19)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-05-09 06:32:54)
+- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2016-01-07 08:45:55)
+
+Copyright (C) 2013-2017 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -65,16 +57,19 @@ enum mdp5_intfnum {
 };
 
 enum mdp5_pipe {
-	SSPP_VIG0 = 0,
-	SSPP_VIG1 = 1,
-	SSPP_VIG2 = 2,
-	SSPP_RGB0 = 3,
-	SSPP_RGB1 = 4,
-	SSPP_RGB2 = 5,
-	SSPP_DMA0 = 6,
-	SSPP_DMA1 = 7,
-	SSPP_VIG3 = 8,
-	SSPP_RGB3 = 9,
+	SSPP_NONE = 0,
+	SSPP_VIG0 = 1,
+	SSPP_VIG1 = 2,
+	SSPP_VIG2 = 3,
+	SSPP_RGB0 = 4,
+	SSPP_RGB1 = 5,
+	SSPP_RGB2 = 6,
+	SSPP_DMA0 = 7,
+	SSPP_DMA1 = 8,
+	SSPP_VIG3 = 9,
+	SSPP_RGB3 = 10,
+	SSPP_CURSOR0 = 11,
+	SSPP_CURSOR1 = 12,
 };
 
 enum mdp5_ctl_mode {
@@ -532,6 +527,7 @@ static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id va
 static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
 {
 	switch (idx) {
+		case SSPP_NONE: return (INVALID_IDX(idx));
 		case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]);
 		case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]);
 		case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]);
@@ -542,6 +538,8 @@ static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
 		case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]);
 		case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]);
 		case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]);
+		case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]);
+		case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]);
 		default: return INVALID_IDX(idx);
 	}
 }
@@ -1073,6 +1071,10 @@ static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000
 #define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA			0x00000004
 #define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA			0x00000008
 #define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA			0x00000010
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA			0x00000020
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA			0x00000040
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA			0x00000080
+#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT		0x80000000
 
 static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); }
 #define MDP5_LM_OUT_SIZE_HEIGHT__MASK				0xffff0000
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 618b2ffed9b4..34ab553f6897 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -421,6 +421,16 @@ const struct mdp5_cfg_hw msm8x96_config = {
 			MDP_PIPE_CAP_SW_PIX_EXT	|
 			0,
 	},
+	.pipe_cursor = {
+		.count = 2,
+		.base = { 0x34000, 0x36000 },
+		.caps = MDP_PIPE_CAP_HFLIP	|
+			MDP_PIPE_CAP_VFLIP	|
+			MDP_PIPE_CAP_SW_PIX_EXT	|
+			MDP_PIPE_CAP_CURSOR	|
+			0,
+	},
+
 	.lm = {
 		.count = 6,
 		.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 050e1618c836..b1c7daaede86 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -32,7 +32,7 @@ extern const struct mdp5_cfg_hw *mdp5_cfg;
 typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
 
 #define MDP5_SUB_BLOCK_DEFINITION \
-	int count; \
+	unsigned int count; \
 	uint32_t base[MAX_BASES]
 
 struct mdp5_sub_block {
@@ -85,6 +85,7 @@ struct mdp5_cfg_hw {
 	struct mdp5_pipe_block pipe_vig;
 	struct mdp5_pipe_block pipe_rgb;
 	struct mdp5_pipe_block pipe_dma;
+	struct mdp5_pipe_block pipe_cursor;
 	struct mdp5_lm_block  lm;
 	struct mdp5_sub_block dspp;
 	struct mdp5_sub_block ad;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index c627ab6d0061..df1c8adec3f3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -16,16 +16,6 @@
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
 
-struct mdp5_cmd_encoder {
-	struct drm_encoder base;
-	struct mdp5_interface intf;
-	bool enabled;
-	uint32_t bsc;
-
-	struct mdp5_ctl *ctl;
-};
-#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
-
 static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
 {
 	struct msm_drm_private *priv = encoder->dev->dev_private;
@@ -36,47 +26,8 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
 #include <mach/board.h>
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
-#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val)		\
-	{						\
-		.src = MSM_BUS_MASTER_MDP_PORT0,	\
-		.dst = MSM_BUS_SLAVE_EBI_CH0,		\
-		.ab = (ab_val),				\
-		.ib = (ib_val),				\
-	}
-
-static struct msm_bus_vectors mdp_bus_vectors[] = {
-	MDP_BUS_VECTOR_ENTRY(0, 0),
-	MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
-};
-static struct msm_bus_paths mdp_bus_usecases[] = { {
-		.num_paths = 1,
-		.vectors = &mdp_bus_vectors[0],
-}, {
-		.num_paths = 1,
-		.vectors = &mdp_bus_vectors[1],
-} };
-static struct msm_bus_scale_pdata mdp_bus_scale_table = {
-	.usecase = mdp_bus_usecases,
-	.num_usecases = ARRAY_SIZE(mdp_bus_usecases),
-	.name = "mdss_mdp",
-};
-
-static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc)
-{
-	mdp5_cmd_enc->bsc = msm_bus_scale_register_client(
-			&mdp_bus_scale_table);
-	DBG("bus scale client: %08x", mdp5_cmd_enc->bsc);
-}
-
-static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc)
-{
-	if (mdp5_cmd_enc->bsc) {
-		msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc);
-		mdp5_cmd_enc->bsc = 0;
-	}
-}
 
-static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx)
+static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx)
 {
 	if (mdp5_cmd_enc->bsc) {
 		DBG("set bus scaling: %d", idx);
@@ -89,14 +40,12 @@ static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx)
 	}
 }
 #else
-static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
-static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
-static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {}
+static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {}
 #endif
 
 #define VSYNC_CLK_RATE 19200000
 static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
-					struct drm_display_mode *mode)
+				    struct drm_display_mode *mode)
 {
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
 	struct device *dev = encoder->dev->dev;
@@ -176,23 +125,11 @@ static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
 	clk_disable_unprepare(mdp5_kms->vsync_clk);
 }
 
-static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder)
-{
-	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
-	bs_fini(mdp5_cmd_enc);
-	drm_encoder_cleanup(encoder);
-	kfree(mdp5_cmd_enc);
-}
-
-static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = {
-	.destroy = mdp5_cmd_encoder_destroy,
-};
-
-static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
-		struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
+void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
 {
-	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+	struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
 
 	mode = adjusted_mode;
 
@@ -209,9 +146,9 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
 				mdp5_cmd_enc->ctl);
 }
 
-static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
 {
-	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+	struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
 	struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
 
@@ -228,9 +165,9 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
 	mdp5_cmd_enc->enabled = false;
 }
 
-static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
+void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
 {
-	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+	struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
 	struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
 
@@ -248,16 +185,10 @@ static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
 	mdp5_cmd_enc->enabled = true;
 }
 
-static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = {
-	.mode_set = mdp5_cmd_encoder_mode_set,
-	.disable = mdp5_cmd_encoder_disable,
-	.enable = mdp5_cmd_encoder_enable,
-};
-
 int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
-					struct drm_encoder *slave_encoder)
+				       struct drm_encoder *slave_encoder)
 {
-	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+	struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms;
 	int intf_num;
 	u32 data = 0;
@@ -292,43 +223,3 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
 
 	return 0;
 }
-
-/* initialize command mode encoder */
-struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-			struct mdp5_interface *intf, struct mdp5_ctl *ctl)
-{
-	struct drm_encoder *encoder = NULL;
-	struct mdp5_cmd_encoder *mdp5_cmd_enc;
-	int ret;
-
-	if (WARN_ON((intf->type != INTF_DSI) &&
-		(intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) {
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL);
-	if (!mdp5_cmd_enc) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
-	encoder = &mdp5_cmd_enc->base;
-	mdp5_cmd_enc->ctl = ctl;
-
-	drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
-			DRM_MODE_ENCODER_DSI, NULL);
-
-	drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs);
-
-	bs_init(mdp5_cmd_enc);
-
-	return encoder;
-
-fail:
-	if (encoder)
-		mdp5_cmd_encoder_destroy(encoder);
-
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 1ce8a01a5a28..d0c8b38b96ce 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -177,6 +177,21 @@ static void mdp5_crtc_destroy(struct drm_crtc *crtc)
 	kfree(mdp5_crtc);
 }
 
+static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
+{
+	switch (stage) {
+	case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
+	case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
+	case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
+	case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
+	case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
+	case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
+	case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
+	default:
+		return 0;
+	}
+}
+
 /*
  * blend_setup() - blend all the planes of a CRTC
  *
@@ -195,8 +210,10 @@ static void blend_setup(struct drm_crtc *crtc)
 	uint32_t lm = mdp5_crtc->lm;
 	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
 	unsigned long flags;
-	uint8_t stage[STAGE_MAX + 1];
+	enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE };
 	int i, plane_cnt = 0;
+	bool bg_alpha_enabled = false;
+	u32 mixer_op_mode = 0;
 #define blender(stage)	((stage) - STAGE0)
 
 	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
@@ -218,6 +235,11 @@ static void blend_setup(struct drm_crtc *crtc)
 	if (!pstates[STAGE_BASE]) {
 		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
 		DBG("Border Color is enabled");
+	} else if (plane_cnt) {
+		format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
+
+		if (format->alpha_enable)
+			bg_alpha_enabled = true;
 	}
 
 	/* The reset for blending */
@@ -232,6 +254,12 @@ static void blend_setup(struct drm_crtc *crtc)
 			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
 		fg_alpha = pstates[i]->alpha;
 		bg_alpha = 0xFF - pstates[i]->alpha;
+
+		if (!format->alpha_enable && bg_alpha_enabled)
+			mixer_op_mode = 0;
+		else
+			mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
+
 		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
 
 		if (format->alpha_enable && pstates[i]->premultiplied) {
@@ -268,6 +296,8 @@ static void blend_setup(struct drm_crtc *crtc)
 				blender(i)), bg_alpha);
 	}
 
+	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode);
+
 	mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
 
 out:
@@ -370,6 +400,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 	struct plane_state pstates[STAGE_MAX + 1];
 	const struct mdp5_cfg_hw *hw_cfg;
 	const struct drm_plane_state *pstate;
+	bool cursor_plane = false;
 	int cnt = 0, base = 0, i;
 
 	DBG("%s: check", crtc->name);
@@ -379,6 +410,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 		pstates[cnt].state = to_mdp5_plane_state(pstate);
 
 		cnt++;
+
+		if (plane->type == DRM_PLANE_TYPE_CURSOR)
+			cursor_plane = true;
 	}
 
 	/* assign a stage based on sorted zpos property */
@@ -390,6 +424,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 	if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
 		base++;
 
+	/* trigger a warning if cursor isn't the highest zorder */
+	WARN_ON(cursor_plane &&
+		(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
+
 	/* verify that there are not too many planes attached to crtc
 	 * and that we don't have conflicting mixer stages:
 	 */
@@ -401,7 +439,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 	}
 
 	for (i = 0; i < cnt; i++) {
-		pstates[i].state->stage = STAGE_BASE + i + base;
+		if (cursor_plane && (i == (cnt - 1)))
+			pstates[i].state->stage = hw_cfg->lm.nb_stages;
+		else
+			pstates[i].state->stage = STAGE_BASE + i + base;
 		DBG("%s: assign pipe %s on stage=%d", crtc->name,
 				pstates[i].plane->name,
 				pstates[i].state->stage);
@@ -612,6 +653,16 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
 	.cursor_move = mdp5_crtc_cursor_move,
 };
 
+static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
+	.set_config = drm_atomic_helper_set_config,
+	.destroy = mdp5_crtc_destroy,
+	.page_flip = drm_atomic_helper_page_flip,
+	.set_property = drm_atomic_helper_crtc_set_property,
+	.reset = drm_atomic_helper_crtc_reset,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
 	.mode_set_nofb = mdp5_crtc_mode_set_nofb,
 	.disable = mdp5_crtc_disable,
@@ -727,6 +778,13 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
 	mdp5_ctl_set_pipeline(ctl, intf, lm);
 }
 
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+	return mdp5_crtc->ctl;
+}
+
 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -745,7 +803,8 @@ void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
 
 /* initialize crtc */
 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
-		struct drm_plane *plane, int id)
+				struct drm_plane *plane,
+				struct drm_plane *cursor_plane, int id)
 {
 	struct drm_crtc *crtc = NULL;
 	struct mdp5_crtc *mdp5_crtc;
@@ -766,8 +825,12 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
 	mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
 	mdp5_crtc->err.irq = mdp5_crtc_err_irq;
 
-	drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs,
-				  NULL);
+	if (cursor_plane)
+		drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
+					  &mdp5_crtc_no_lm_cursor_funcs, NULL);
+	else
+		drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+					  &mdp5_crtc_funcs, NULL);
 
 	drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
 			"unref cursor", unref_cursor_worker);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index d021edc3b307..8b93f7e13200 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -326,6 +326,8 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
 	case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
 	case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
 	case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+	case SSPP_CURSOR0:
+	case SSPP_CURSOR1:
 	default:	return 0;
 	}
 }
@@ -333,7 +335,7 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
 static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
 		enum mdp_mixer_stage_id stage)
 {
-	if (stage < STAGE6)
+	if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
 		return 0;
 
 	switch (pipe) {
@@ -347,12 +349,14 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
 	case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
 	case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
 	case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
+	case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
+	case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
 	default:	return 0;
 	}
 }
 
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
-	u32 ctl_blend_op_flags)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
+		   u32 ctl_blend_op_flags)
 {
 	unsigned long flags;
 	u32 blend_cfg = 0, blend_ext_cfg = 0;
@@ -365,7 +369,7 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
 		start_stage = STAGE_BASE;
 	}
 
-	for (i = start_stage; i < start_stage + stage_cnt; i++) {
+	for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
 		blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
 		blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
 	}
@@ -422,6 +426,8 @@ u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
 	case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
 	case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
 	case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
+	case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
+	case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
 	default:        return 0;
 	}
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 96148c6f863c..fda00d33e4db 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -56,8 +56,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
  * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
  */
 #define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT	BIT(0)
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
-	u32 ctl_blend_op_flags);
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
+		   u32 ctl_blend_op_flags);
 
 /**
  * mdp_ctl_flush_mask...() - Register FLUSH masks
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index fe0c22230883..80fa482ae8ed 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -21,17 +21,6 @@
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
 
-struct mdp5_encoder {
-	struct drm_encoder base;
-	struct mdp5_interface intf;
-	spinlock_t intf_lock;	/* protect REG_MDP5_INTF_* registers */
-	bool enabled;
-	uint32_t bsc;
-
-	struct mdp5_ctl *ctl;
-};
-#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
-
 static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
 {
 	struct msm_drm_private *priv = encoder->dev->dev_private;
@@ -112,9 +101,9 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = {
 	.destroy = mdp5_encoder_destroy,
 };
 
-static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
-		struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
+static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
+				      struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
@@ -221,7 +210,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
 				mdp5_encoder->ctl);
 }
 
-static void mdp5_encoder_disable(struct drm_encoder *encoder)
+static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
@@ -256,7 +245,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
 	mdp5_encoder->enabled = false;
 }
 
-static void mdp5_encoder_enable(struct drm_encoder *encoder)
+static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
@@ -279,6 +268,41 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
 	mdp5_encoder->enabled = true;
 }
 
+static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_interface *intf = &mdp5_encoder->intf;
+
+	if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+		mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
+	else
+		mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode);
+}
+
+static void mdp5_encoder_disable(struct drm_encoder *encoder)
+{
+	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_interface *intf = &mdp5_encoder->intf;
+
+	if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+		mdp5_cmd_encoder_disable(encoder);
+	else
+		mdp5_vid_encoder_disable(encoder);
+}
+
+static void mdp5_encoder_enable(struct drm_encoder *encoder)
+{
+	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_interface *intf = &mdp5_encoder->intf;
+
+	if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+		mdp5_cmd_encoder_disable(encoder);
+	else
+		mdp5_vid_encoder_enable(encoder);
+}
+
 static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
 	.mode_set = mdp5_encoder_mode_set,
 	.disable = mdp5_encoder_disable,
@@ -303,8 +327,8 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
 	return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
 }
 
-int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
-					struct drm_encoder *slave_encoder)
+int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
+				       struct drm_encoder *slave_encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
@@ -342,6 +366,23 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 	return 0;
 }
 
+void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
+{
+	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_interface *intf = &mdp5_encoder->intf;
+
+	/* TODO: Expand this to set writeback modes too */
+	if (cmd_mode) {
+		WARN_ON(intf->type != INTF_DSI);
+		intf->mode = MDP5_INTF_DSI_MODE_COMMAND;
+	} else {
+		if (intf->type == INTF_DSI)
+			intf->mode = MDP5_INTF_DSI_MODE_VIDEO;
+		else
+			intf->mode = MDP5_INTF_MODE_NONE;
+	}
+}
+
 /* initialize encoder */
 struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
 			struct mdp5_interface *intf, struct mdp5_ctl *ctl)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index c396d459a9d0..3eb0749223d9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -148,7 +148,15 @@ static int mdp5_set_split_display(struct msm_kms *kms,
 		return mdp5_cmd_encoder_set_split_display(encoder,
 							slave_encoder);
 	else
-		return mdp5_encoder_set_split_display(encoder, slave_encoder);
+		return mdp5_vid_encoder_set_split_display(encoder,
+							  slave_encoder);
+}
+
+static void mdp5_set_encoder_mode(struct msm_kms *kms,
+				  struct drm_encoder *encoder,
+				  bool cmd_mode)
+{
+	mdp5_encoder_set_intf_mode(encoder, cmd_mode);
 }
 
 static void mdp5_kms_destroy(struct msm_kms *kms)
@@ -230,6 +238,7 @@ static const struct mdp_kms_funcs kms_funcs = {
 		.get_format      = mdp_get_format,
 		.round_pixclk    = mdp5_round_pixclk,
 		.set_split_display = mdp5_set_split_display,
+		.set_encoder_mode = mdp5_set_encoder_mode,
 		.destroy         = mdp5_kms_destroy,
 #ifdef CONFIG_DEBUG_FS
 		.debugfs_init    = mdp5_kms_debugfs_init,
@@ -267,7 +276,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
 
 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 		enum mdp5_intf_type intf_type, int intf_num,
-		enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl)
+		struct mdp5_ctl *ctl)
 {
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
@@ -275,21 +284,15 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 	struct mdp5_interface intf = {
 			.num	= intf_num,
 			.type	= intf_type,
-			.mode	= intf_mode,
+			.mode	= MDP5_INTF_MODE_NONE,
 	};
 
-	if ((intf_type == INTF_DSI) &&
-		(intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
-		encoder = mdp5_cmd_encoder_init(dev, &intf, ctl);
-	else
-		encoder = mdp5_encoder_init(dev, &intf, ctl);
-
+	encoder = mdp5_encoder_init(dev, &intf, ctl);
 	if (IS_ERR(encoder)) {
 		dev_err(dev->dev, "failed to construct encoder\n");
 		return encoder;
 	}
 
-	encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
 	priv->encoders[priv->num_encoders++] = encoder;
 
 	return encoder;
@@ -338,8 +341,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 			break;
 		}
 
-		encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
-					MDP5_INTF_MODE_NONE, ctl);
+		encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
 			break;
@@ -357,8 +359,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 			break;
 		}
 
-		encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
-					MDP5_INTF_MODE_NONE, ctl);
+		encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
 			break;
@@ -369,9 +370,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 	case INTF_DSI:
 	{
 		int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
-		struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
-		enum mdp5_intf_mode mode;
-		int i;
 
 		if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
 			dev_err(dev->dev, "failed to find dsi from intf %d\n",
@@ -389,19 +387,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 			break;
 		}
 
-		for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
-			mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
-				MDP5_INTF_DSI_MODE_COMMAND :
-				MDP5_INTF_DSI_MODE_VIDEO;
-			dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
-							intf_num, mode, ctl);
-			if (IS_ERR(dsi_encs[i])) {
-				ret = PTR_ERR(dsi_encs[i]);
-				break;
-			}
+		encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl);
+		if (IS_ERR(encoder)) {
+			ret = PTR_ERR(encoder);
+			break;
 		}
 
-		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
+		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
 		break;
 	}
 	default:
@@ -418,20 +410,48 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	const struct mdp5_cfg_hw *hw_cfg;
-	int i, ret;
+	unsigned int num_crtcs;
+	int i, ret, pi = 0, ci = 0;
+	struct drm_plane *primary[MAX_BASES] = { NULL };
+	struct drm_plane *cursor[MAX_BASES] = { NULL };
 
 	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 
-	/* Construct planes equaling the number of hw pipes, and CRTCs
-	 * for the N layer-mixers (LM).  The first N planes become primary
+	/*
+	 * Construct encoders and modeset initialize connector devices
+	 * for each external display interface.
+	 */
+	for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
+		ret = modeset_init_intf(mdp5_kms, i);
+		if (ret)
+			goto fail;
+	}
+
+	/*
+	 * We should ideally have less number of encoders (set up by parsing
+	 * the MDP5 interfaces) than the number of layer mixers present in HW,
+	 * but let's be safe here anyway
+	 */
+	num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count);
+
+	/*
+	 * Construct planes equaling the number of hw pipes, and CRTCs for the
+	 * N encoders set up by the driver. The first N planes become primary
 	 * planes for the CRTCs, with the remainder as overlay planes:
 	 */
 	for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
-		bool primary = i < mdp5_cfg->lm.count;
+		struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
 		struct drm_plane *plane;
-		struct drm_crtc *crtc;
+		enum drm_plane_type type;
 
-		plane = mdp5_plane_init(dev, primary);
+		if (i < num_crtcs)
+			type = DRM_PLANE_TYPE_PRIMARY;
+		else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
+			type = DRM_PLANE_TYPE_CURSOR;
+		else
+			type = DRM_PLANE_TYPE_OVERLAY;
+
+		plane = mdp5_plane_init(dev, type);
 		if (IS_ERR(plane)) {
 			ret = PTR_ERR(plane);
 			dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
@@ -439,10 +459,16 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
 		}
 		priv->planes[priv->num_planes++] = plane;
 
-		if (!primary)
-			continue;
+		if (type == DRM_PLANE_TYPE_PRIMARY)
+			primary[pi++] = plane;
+		if (type == DRM_PLANE_TYPE_CURSOR)
+			cursor[ci++] = plane;
+	}
+
+	for (i = 0; i < num_crtcs; i++) {
+		struct drm_crtc *crtc;
 
-		crtc  = mdp5_crtc_init(dev, plane, i);
+		crtc  = mdp5_crtc_init(dev, primary[i], cursor[i], i);
 		if (IS_ERR(crtc)) {
 			ret = PTR_ERR(crtc);
 			dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
@@ -451,13 +477,14 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
 		priv->crtcs[priv->num_crtcs++] = crtc;
 	}
 
-	/* Construct encoders and modeset initialize connector devices
-	 * for each external display interface.
+	/*
+	 * Now that we know the number of crtcs we've created, set the possible
+	 * crtcs for the encoders
 	 */
-	for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
-		ret = modeset_init_intf(mdp5_kms, i);
-		if (ret)
-			goto fail;
+	for (i = 0; i < priv->num_encoders; i++) {
+		struct drm_encoder *encoder = priv->encoders[i];
+
+		encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
 	}
 
 	return 0;
@@ -773,6 +800,9 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
 	static const enum mdp5_pipe dma_planes[] = {
 			SSPP_DMA0, SSPP_DMA1,
 	};
+	static const enum mdp5_pipe cursor_planes[] = {
+			SSPP_CURSOR0, SSPP_CURSOR1,
+	};
 	const struct mdp5_cfg_hw *hw_cfg;
 	int ret;
 
@@ -796,6 +826,13 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
 	if (ret)
 		return ret;
 
+	/* Construct cursor pipes: */
+	ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
+			cursor_planes, hw_cfg->pipe_cursor.base,
+			hw_cfg->pipe_cursor.caps);
+	if (ret)
+		return ret;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index cdfc63d90c7b..9de471191eba 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -126,6 +126,17 @@ struct mdp5_interface {
 	enum mdp5_intf_mode mode;
 };
 
+struct mdp5_encoder {
+	struct drm_encoder base;
+	struct mdp5_interface intf;
+	spinlock_t intf_lock;	/* protect REG_MDP5_INTF_* registers */
+	bool enabled;
+	uint32_t bsc;
+
+	struct mdp5_ctl *ctl;
+};
+#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
+
 static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
 {
 	msm_writel(data, mdp5_kms->mmio + reg);
@@ -156,6 +167,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
 		NAME(RGB0), NAME(RGB1), NAME(RGB2),
 		NAME(DMA0), NAME(DMA1),
 		NAME(VIG3), NAME(RGB3),
+		NAME(CURSOR0), NAME(CURSOR1),
 #undef NAME
 	};
 	return names[pipe];
@@ -231,8 +243,10 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
 
 uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
-struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+				  enum drm_plane_type type);
 
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc);
@@ -240,25 +254,36 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
 		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
-		struct drm_plane *plane, int id);
+				struct drm_plane *plane,
+				struct drm_plane *cursor_plane, int id);
 
 struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
 		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
-int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
-					struct drm_encoder *slave_encoder);
+int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
+				       struct drm_encoder *slave_encoder);
+void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode);
 int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
 u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
 
 #ifdef CONFIG_DRM_MSM_DSI
-struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
+void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode);
+void mdp5_cmd_encoder_disable(struct drm_encoder *encoder);
+void mdp5_cmd_encoder_enable(struct drm_encoder *encoder);
 int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
-					struct drm_encoder *slave_encoder);
+				       struct drm_encoder *slave_encoder);
 #else
-static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
+static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+					     struct drm_display_mode *mode,
+					     struct drm_display_mode *adjusted_mode)
+{
+}
+static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+{
+}
+static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
 {
-	return ERR_PTR(-EINVAL);
 }
 static inline int mdp5_cmd_encoder_set_split_display(
 	struct drm_encoder *encoder, struct drm_encoder *slave_encoder)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
index 1ae9dc8d260d..35c4dabb0c0c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
@@ -53,6 +53,14 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
 		if (caps & ~cur->caps)
 			continue;
 
+		/*
+		 * don't assign a cursor pipe to a plane that isn't going to
+		 * be used as a cursor
+		 */
+		if (cur->caps & MDP_PIPE_CAP_CURSOR &&
+				plane->type != DRM_PLANE_TYPE_CURSOR)
+			continue;
+
 		/* possible candidate, take the one with the
 		 * fewest unneeded caps bits set:
 		 */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index b9fb111d3428..0ffb8affef35 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -29,6 +29,11 @@ struct mdp5_plane {
 
 static int mdp5_plane_mode_set(struct drm_plane *plane,
 		struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		struct drm_rect *src, struct drm_rect *dest);
+
+static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
+		struct drm_crtc *crtc,
+		struct drm_framebuffer *fb,
 		int crtc_x, int crtc_y,
 		unsigned int crtc_w, unsigned int crtc_h,
 		uint32_t src_x, uint32_t src_y,
@@ -45,7 +50,7 @@ static struct mdp5_kms *get_kms(struct drm_plane *plane)
 
 static bool plane_enabled(struct drm_plane_state *state)
 {
-	return state->fb && state->crtc;
+	return state->visible;
 }
 
 static void mdp5_plane_destroy(struct drm_plane *plane)
@@ -246,6 +251,19 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
 		.atomic_print_state = mdp5_plane_atomic_print_state,
 };
 
+static const struct drm_plane_funcs mdp5_cursor_plane_funcs = {
+		.update_plane = mdp5_update_cursor_plane_legacy,
+		.disable_plane = drm_atomic_helper_disable_plane,
+		.destroy = mdp5_plane_destroy,
+		.set_property = drm_atomic_helper_plane_set_property,
+		.atomic_set_property = mdp5_plane_atomic_set_property,
+		.atomic_get_property = mdp5_plane_atomic_get_property,
+		.reset = mdp5_plane_reset,
+		.atomic_duplicate_state = mdp5_plane_duplicate_state,
+		.atomic_destroy_state = mdp5_plane_destroy_state,
+		.atomic_print_state = mdp5_plane_atomic_print_state,
+};
+
 static int mdp5_plane_prepare_fb(struct drm_plane *plane,
 				 struct drm_plane_state *new_state)
 {
@@ -272,15 +290,20 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
 	msm_framebuffer_cleanup(fb, mdp5_kms->id);
 }
 
-static int mdp5_plane_atomic_check(struct drm_plane *plane,
-		struct drm_plane_state *state)
+#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
+static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
+					      struct drm_plane_state *state)
 {
 	struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
+	struct drm_plane *plane = state->plane;
 	struct drm_plane_state *old_state = plane->state;
 	struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
 	bool new_hwpipe = false;
 	uint32_t max_width, max_height;
 	uint32_t caps = 0;
+	struct drm_rect clip;
+	int min_scale, max_scale;
+	int ret;
 
 	DBG("%s: check (%d -> %d)", plane->name,
 			plane_enabled(old_state), plane_enabled(state));
@@ -296,6 +319,18 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 		return -ERANGE;
 	}
 
+	clip.x1 = 0;
+	clip.y1 = 0;
+	clip.x2 = crtc_state->adjusted_mode.hdisplay;
+	clip.y2 = crtc_state->adjusted_mode.vdisplay;
+	min_scale = FRAC_16_16(1, 8);
+	max_scale = FRAC_16_16(8, 1);
+
+	ret = drm_plane_helper_check_state(state, &clip, min_scale,
+					   max_scale, true, true);
+	if (ret)
+		return ret;
+
 	if (plane_enabled(state)) {
 		unsigned int rotation;
 		const struct mdp_format *format;
@@ -321,6 +356,9 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 		if (rotation & DRM_REFLECT_Y)
 			caps |= MDP_PIPE_CAP_VFLIP;
 
+		if (plane->type == DRM_PLANE_TYPE_CURSOR)
+			caps |= MDP_PIPE_CAP_CURSOR;
+
 		/* (re)allocate hw pipe if we don't have one or caps-mismatch: */
 		if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
 			new_hwpipe = true;
@@ -356,6 +394,23 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 	return 0;
 }
 
+static int mdp5_plane_atomic_check(struct drm_plane *plane,
+				   struct drm_plane_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+
+	crtc = state->crtc ? state->crtc : plane->state->crtc;
+	if (!crtc)
+		return 0;
+
+	crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+	if (WARN_ON(!crtc_state))
+		return -EINVAL;
+
+	return mdp5_plane_atomic_check_with_state(crtc_state, state);
+}
+
 static void mdp5_plane_atomic_update(struct drm_plane *plane,
 				     struct drm_plane_state *old_state)
 {
@@ -368,10 +423,7 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
 
 		ret = mdp5_plane_mode_set(plane,
 				state->crtc, state->fb,
-				state->crtc_x, state->crtc_y,
-				state->crtc_w, state->crtc_h,
-				state->src_x,  state->src_y,
-				state->src_w, state->src_h);
+				&state->src, &state->dst);
 		/* atomic_check should have ensured that this doesn't fail */
 		WARN_ON(ret < 0);
 	}
@@ -664,10 +716,7 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
 
 static int mdp5_plane_mode_set(struct drm_plane *plane,
 		struct drm_crtc *crtc, struct drm_framebuffer *fb,
-		int crtc_x, int crtc_y,
-		unsigned int crtc_w, unsigned int crtc_h,
-		uint32_t src_x, uint32_t src_y,
-		uint32_t src_w, uint32_t src_h)
+		struct drm_rect *src, struct drm_rect *dest)
 {
 	struct drm_plane_state *pstate = plane->state;
 	struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
@@ -683,6 +732,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 	uint32_t pix_format;
 	unsigned int rotation;
 	bool vflip, hflip;
+	int crtc_x, crtc_y;
+	unsigned int crtc_w, crtc_h;
+	uint32_t src_x, src_y;
+	uint32_t src_w, src_h;
 	unsigned long flags;
 	int ret;
 
@@ -695,6 +748,16 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 	format = to_mdp_format(msm_framebuffer_format(fb));
 	pix_format = format->base.pixel_format;
 
+	src_x = src->x1;
+	src_y = src->y1;
+	src_w = drm_rect_width(src);
+	src_h = drm_rect_height(src);
+
+	crtc_x = dest->x1;
+	crtc_y = dest->y1;
+	crtc_w = drm_rect_width(dest);
+	crtc_h = drm_rect_height(dest);
+
 	/* src values are in Q16 fixed point, convert to integer: */
 	src_x = src_x >> 16;
 	src_y = src_y >> 16;
@@ -818,12 +881,88 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 	return ret;
 }
 
+static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
+			struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			int crtc_x, int crtc_y,
+			unsigned int crtc_w, unsigned int crtc_h,
+			uint32_t src_x, uint32_t src_y,
+			uint32_t src_w, uint32_t src_h)
+{
+	struct drm_plane_state *plane_state, *new_plane_state;
+	struct mdp5_plane_state *mdp5_pstate;
+	struct drm_crtc_state *crtc_state = crtc->state;
+	int ret;
+
+	if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state))
+		goto slow;
+
+	plane_state = plane->state;
+	mdp5_pstate = to_mdp5_plane_state(plane_state);
+
+	/* don't use fast path if we don't have a hwpipe allocated yet */
+	if (!mdp5_pstate->hwpipe)
+		goto slow;
+
+	/* only allow changing of position(crtc x/y or src x/y) in fast path */
+	if (plane_state->crtc != crtc ||
+	    plane_state->src_w != src_w ||
+	    plane_state->src_h != src_h ||
+	    plane_state->crtc_w != crtc_w ||
+	    plane_state->crtc_h != crtc_h ||
+	    !plane_state->fb ||
+	    plane_state->fb != fb)
+		goto slow;
+
+	new_plane_state = mdp5_plane_duplicate_state(plane);
+	if (!new_plane_state)
+		return -ENOMEM;
+
+	new_plane_state->src_x = src_x;
+	new_plane_state->src_y = src_y;
+	new_plane_state->src_w = src_w;
+	new_plane_state->src_h = src_h;
+	new_plane_state->crtc_x = crtc_x;
+	new_plane_state->crtc_y = crtc_y;
+	new_plane_state->crtc_w = crtc_w;
+	new_plane_state->crtc_h = crtc_h;
+
+	ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
+	if (ret)
+		goto slow_free;
+
+	if (new_plane_state->visible) {
+		struct mdp5_ctl *ctl;
+
+		ret = mdp5_plane_mode_set(plane, crtc, fb,
+					  &new_plane_state->src,
+					  &new_plane_state->dst);
+		WARN_ON(ret < 0);
+
+		ctl = mdp5_crtc_get_ctl(crtc);
+
+		mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane));
+	}
+
+	*to_mdp5_plane_state(plane_state) =
+		*to_mdp5_plane_state(new_plane_state);
+
+	mdp5_plane_destroy_state(plane, new_plane_state);
+
+	return 0;
+slow_free:
+	mdp5_plane_destroy_state(plane, new_plane_state);
+slow:
+	return drm_atomic_helper_update_plane(plane, crtc, fb,
+					      crtc_x, crtc_y, crtc_w, crtc_h,
+					      src_x, src_y, src_w, src_h);
+}
+
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
 {
 	struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
 
 	if (WARN_ON(!pstate->hwpipe))
-		return 0;
+		return SSPP_NONE;
 
 	return pstate->hwpipe->pipe;
 }
@@ -839,12 +978,12 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
 }
 
 /* initialize plane */
-struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+				  enum drm_plane_type type)
 {
 	struct drm_plane *plane = NULL;
 	struct mdp5_plane *mdp5_plane;
 	int ret;
-	enum drm_plane_type type;
 
 	mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
 	if (!mdp5_plane) {
@@ -857,10 +996,16 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
 	mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
 		ARRAY_SIZE(mdp5_plane->formats), false);
 
-	type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
-	ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
-				 mdp5_plane->formats, mdp5_plane->nformats,
-				 type, NULL);
+	if (type == DRM_PLANE_TYPE_CURSOR)
+		ret = drm_universal_plane_init(dev, plane, 0xff,
+				&mdp5_cursor_plane_funcs,
+				mdp5_plane->formats, mdp5_plane->nformats,
+				type, NULL);
+	else
+		ret = drm_universal_plane_init(dev, plane, 0xff,
+				&mdp5_plane_funcs,
+				mdp5_plane->formats, mdp5_plane->nformats,
+				type, NULL);
 	if (ret)
 		goto fail;
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 303130320748..7574cdfef418 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -112,6 +112,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
 #define MDP_PIPE_CAP_CSC			BIT(3)
 #define MDP_PIPE_CAP_DECIMATION			BIT(4)
 #define MDP_PIPE_CAP_SW_PIX_EXT			BIT(5)
+#define MDP_PIPE_CAP_CURSOR			BIT(6)
 
 static inline bool pipe_supports_yuv(uint32_t pipe_caps)
 {
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 30b5d23e53b4..9633a68b14d7 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -93,11 +93,6 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
 		if (!crtc->state->enable)
 			continue;
 
-		/* Legacy cursor ioctls are completely unsynced, and userspace
-		 * relies on that (by doing tons of cursor updates). */
-		if (old_state->legacy_cursor_update)
-			continue;
-
 		kms->funcs->wait_for_crtc_commit_done(kms, crtc);
 	}
 }
@@ -151,20 +146,29 @@ static void commit_worker(struct work_struct *work)
 	complete_commit(container_of(work, struct msm_commit, work), true);
 }
 
+/*
+ * this func is identical to the drm_atomic_helper_check, but we keep this
+ * because we might eventually need to have a more finegrained check
+ * sequence without using the atomic helpers.
+ *
+ * In the past, we first called drm_atomic_helper_check_planes, and then
+ * drm_atomic_helper_check_modeset. We needed this because the MDP5 plane's
+ * ->atomic_check could update ->mode_changed for pixel format changes.
+ * This, however isn't needed now because if there is a pixel format change,
+ * we just assign a new hwpipe for it with a new SMP allocation. We might
+ * eventually hit a condition where we would need to do a full modeset if
+ * we run out of planes. There, we'd probably need to set mode_changed.
+ */
 int msm_atomic_check(struct drm_device *dev,
 		     struct drm_atomic_state *state)
 {
 	int ret;
 
-	/*
-	 * msm ->atomic_check can update ->mode_changed for pixel format
-	 * changes, hence must be run before we check the modeset changes.
-	 */
-	ret = drm_atomic_helper_check_planes(dev, state);
+	ret = drm_atomic_helper_check_modeset(dev, state);
 	if (ret)
 		return ret;
 
-	ret = drm_atomic_helper_check_modeset(dev, state);
+	ret = drm_atomic_helper_check_planes(dev, state);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e29bb66f55b1..70226eaa5cac 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -91,6 +91,25 @@ module_param(dumpstate, bool, 0600);
  * Util/helpers:
  */
 
+struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
+{
+	struct clk *clk;
+	char name2[32];
+
+	clk = devm_clk_get(&pdev->dev, name);
+	if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+		return clk;
+
+	snprintf(name2, sizeof(name2), "%s_clk", name);
+
+	clk = devm_clk_get(&pdev->dev, name2);
+	if (!IS_ERR(clk))
+		dev_warn(&pdev->dev, "Using legacy clk name binding.  Use "
+				"\"%s\" instead of \"%s\"\n", name, name2);
+
+	return clk;
+}
+
 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 		const char *dbgname)
 {
@@ -985,6 +1004,7 @@ static int add_display_components(struct device *dev,
  * as components.
  */
 static const struct of_device_id msm_gpu_match[] = {
+	{ .compatible = "qcom,adreno" },
 	{ .compatible = "qcom,adreno-3xx" },
 	{ .compatible = "qcom,kgsl-3d0" },
 	{ },
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index ed4dad3ca133..cdd7b2f8e977 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -275,16 +275,11 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
 		struct drm_encoder *encoder);
 
 struct msm_dsi;
-enum msm_dsi_encoder_id {
-	MSM_DSI_VIDEO_ENCODER_ID = 0,
-	MSM_DSI_CMD_ENCODER_ID = 1,
-	MSM_DSI_ENCODER_NUM = 2
-};
 #ifdef CONFIG_DRM_MSM_DSI
 void __init msm_dsi_register(void);
 void __exit msm_dsi_unregister(void);
 int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
-		struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]);
+			 struct drm_encoder *encoder);
 #else
 static inline void __init msm_dsi_register(void)
 {
@@ -293,8 +288,8 @@ static inline void __exit msm_dsi_unregister(void)
 {
 }
 static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
-		struct drm_device *dev,
-		struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+				       struct drm_device *dev,
+				       struct drm_encoder *encoder)
 {
 	return -EINVAL;
 }
@@ -318,6 +313,7 @@ static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
 static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
 #endif
 
+struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 		const char *dbgname);
 void msm_writel(u32 data, void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index f8a587eac6b8..6b1b375653f7 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -201,8 +201,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, helper,
-			priv->num_crtcs, priv->num_connectors);
+	ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
 	if (ret) {
 		dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
 		goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8098677a3916..c3b43f4d4f1f 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -54,8 +54,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
 	if (!p)
 		return ERR_PTR(-ENOMEM);
 
-	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
-			npages, 0, DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
 	if (ret) {
 		drm_free_large(p);
 		return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 489676568a10..1172fe7a9252 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -95,13 +95,13 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
 		 */
 		submit->bos[i].flags = 0;
 
-		ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
-		if (unlikely(ret)) {
+		if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
 			pagefault_enable();
 			spin_unlock(&file->table_lock);
-			ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
-			if (ret)
+			if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
+				ret = -EFAULT;
 				goto out;
+			}
 			spin_lock(&file->table_lock);
 			pagefault_disable();
 		}
@@ -317,9 +317,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
 		uint64_t iova;
 		bool valid;
 
-		ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
-		if (ret)
+		if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
+			ret = -EFAULT;
 			goto out;
+		}
 
 		if (submit_reloc.submit_offset % 4) {
 			DRM_ERROR("non-aligned reloc offset: %u\n",
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index a311d26ccb21..b654eca7636a 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -45,8 +45,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
 	if (WARN_ON(drm_mm_node_allocated(&vma->node)))
 		return 0;
 
-	ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages,
-			0, DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index b28527a65d09..99e05aacbee1 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -560,8 +560,7 @@ static irqreturn_t irq_handler(int irq, void *data)
 }
 
 static const char *clk_names[] = {
-		"core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk",
-		"mem_iface_clk", "alt_mem_iface_clk",
+	"core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface",
 };
 
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@@ -625,13 +624,13 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 
 	/* Acquire clocks: */
 	for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
-		gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+		gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]);
 		DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
 		if (IS_ERR(gpu->grp_clks[i]))
 			gpu->grp_clks[i] = NULL;
 	}
 
-	gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	gpu->ebi1_clk = msm_clk_get(pdev, "bus");
 	DBG("ebi1_clk: %p", gpu->ebi1_clk);
 	if (IS_ERR(gpu->ebi1_clk))
 		gpu->ebi1_clk = NULL;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 61aaaa1de6eb..7f5779daf5c8 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -24,9 +24,12 @@ struct msm_iommu {
 };
 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
 
-static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
+static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
 		unsigned long iova, int flags, void *arg)
 {
+	struct msm_iommu *iommu = arg;
+	if (iommu->base.handler)
+		return iommu->base.handler(iommu->base.arg, iova, flags);
 	pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
 	return 0;
 }
@@ -136,7 +139,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
 
 	iommu->domain = domain;
 	msm_mmu_init(&iommu->base, dev, &funcs);
-	iommu_set_fault_handler(domain, msm_fault_handler, dev);
+	iommu_set_fault_handler(domain, msm_fault_handler, iommu);
 
 	return &iommu->base;
 }
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e470f4cf8f76..117635d2b8c5 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -56,6 +56,9 @@ struct msm_kms_funcs {
 			struct drm_encoder *encoder,
 			struct drm_encoder *slave_encoder,
 			bool is_cmd_mode);
+	void (*set_encoder_mode)(struct msm_kms *kms,
+				 struct drm_encoder *encoder,
+				 bool cmd_mode);
 	/* cleanup: */
 	void (*destroy)(struct msm_kms *kms);
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index f85c879e68d2..aa2c5d4580c8 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -33,6 +33,8 @@ struct msm_mmu_funcs {
 struct msm_mmu {
 	const struct msm_mmu_funcs *funcs;
 	struct device *dev;
+	int (*handler)(void *arg, unsigned long iova, int flags);
+	void *arg;
 };
 
 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
@@ -45,4 +47,11 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
 struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
 
+static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
+		int (*handler)(void *arg, unsigned long iova, int flags))
+{
+	mmu->arg = arg;
+	mmu->handler = handler;
+}
+
 #endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 955441f71500..cdfbe0284635 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -218,7 +218,7 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags)
 
 	drm_kms_helper_poll_init(drm);
 
-	mxsfb->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+	mxsfb->fbdev = drm_fbdev_cma_init(drm, 32,
 					  drm->mode_config.num_connector);
 	if (IS_ERR(mxsfb->fbdev)) {
 		mxsfb->fbdev = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9de6abb65781..971c147a3984 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -507,8 +507,7 @@ nouveau_fbcon_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, &fbcon->helper,
-				 dev->mode_config.num_crtc, 4);
+	ret = drm_fb_helper_init(dev, &fbcon->helper, 4);
 	if (ret)
 		goto free;
 
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 2a839956dae6..942c4d483008 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -263,8 +263,7 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, helper,
-			priv->num_crtcs, priv->num_connectors);
+	ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
 	if (ret) {
 		dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
 		goto fail;
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 057b2b547cac..d58751c94618 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -133,8 +133,8 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
 	qdev->debugfs_count = i;
 #if defined(CONFIG_DEBUG_FS)
 	drm_debugfs_create_files(files, nfiles,
-				 qdev->ddev->primary->debugfs_root,
-				 qdev->ddev->primary);
+				 qdev->ddev.primary->debugfs_root,
+				 qdev->ddev.primary);
 #endif
 	return 0;
 }
@@ -147,7 +147,7 @@ void qxl_debugfs_remove_files(struct qxl_device *qdev)
 	for (i = 0; i < qdev->debugfs_count; i++) {
 		drm_debugfs_remove_files(qdev->debugfs[i].files,
 					 qdev->debugfs[i].num_files,
-					 qdev->ddev->primary);
+					 qdev->ddev.primary);
 	}
 #endif
 }
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 416ade8566b7..1094cd33eb06 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -136,7 +136,7 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
 
 static void qxl_update_offset_props(struct qxl_device *qdev)
 {
-	struct drm_device *dev = qdev->ddev;
+	struct drm_device *dev = &qdev->ddev;
 	struct drm_connector *connector;
 	struct qxl_output *output;
 	struct qxl_head *head;
@@ -156,7 +156,7 @@ static void qxl_update_offset_props(struct qxl_device *qdev)
 void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
 {
 
-	struct drm_device *dev = qdev->ddev;
+	struct drm_device *dev = &qdev->ddev;
 	int status;
 
 	status = qxl_display_copy_rom_client_monitors_config(qdev);
@@ -174,10 +174,10 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
 	drm_modeset_lock_all(dev);
 	qxl_update_offset_props(qdev);
 	drm_modeset_unlock_all(dev);
-	if (!drm_helper_hpd_irq_event(qdev->ddev)) {
+	if (!drm_helper_hpd_irq_event(dev)) {
 		/* notify that the monitor configuration changed, to
 		   adjust at the arbitrary resolution */
-		drm_kms_helper_hotplug_event(qdev->ddev);
+		drm_kms_helper_hotplug_event(dev);
 	}
 }
 
@@ -1036,7 +1036,7 @@ static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
 		return 0;
 
 	qdev->hotplug_mode_update_property =
-		drm_property_create_range(qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
+		drm_property_create_range(&qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
 					  "hotplug_mode_update", 0, 1);
 
 	return 0;
@@ -1175,28 +1175,28 @@ int qxl_modeset_init(struct qxl_device *qdev)
 	int i;
 	int ret;
 
-	drm_mode_config_init(qdev->ddev);
+	drm_mode_config_init(&qdev->ddev);
 
 	ret = qxl_create_monitors_object(qdev);
 	if (ret)
 		return ret;
 
-	qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs;
+	qdev->ddev.mode_config.funcs = (void *)&qxl_mode_funcs;
 
 	/* modes will be validated against the framebuffer size */
-	qdev->ddev->mode_config.min_width = 320;
-	qdev->ddev->mode_config.min_height = 200;
-	qdev->ddev->mode_config.max_width = 8192;
-	qdev->ddev->mode_config.max_height = 8192;
+	qdev->ddev.mode_config.min_width = 320;
+	qdev->ddev.mode_config.min_height = 200;
+	qdev->ddev.mode_config.max_width = 8192;
+	qdev->ddev.mode_config.max_height = 8192;
 
-	qdev->ddev->mode_config.fb_base = qdev->vram_base;
+	qdev->ddev.mode_config.fb_base = qdev->vram_base;
 
-	drm_mode_create_suggested_offset_properties(qdev->ddev);
+	drm_mode_create_suggested_offset_properties(&qdev->ddev);
 	qxl_mode_create_hotplug_mode_update_property(qdev);
 
 	for (i = 0 ; i < qxl_num_crtc; ++i) {
-		qdev_crtc_init(qdev->ddev, i);
-		qdev_output_init(qdev->ddev, i);
+		qdev_crtc_init(&qdev->ddev, i);
+		qdev_output_init(&qdev->ddev, i);
 	}
 
 	qdev->mode_info.mode_config_initialized = true;
@@ -1214,7 +1214,7 @@ void qxl_modeset_fini(struct qxl_device *qdev)
 
 	qxl_destroy_monitors_object(qdev);
 	if (qdev->mode_info.mode_config_initialized) {
-		drm_mode_config_cleanup(qdev->ddev);
+		drm_mode_config_cleanup(&qdev->ddev);
 		qdev->mode_info.mode_config_initialized = false;
 	}
 }
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 6e0f8a2d8ac9..8e17c241e63c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -62,7 +62,6 @@ static struct pci_driver qxl_pci_driver;
 static int
 qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	struct drm_device *drm;
 	struct qxl_device *qdev;
 	int ret;
 
@@ -72,29 +71,19 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		return -EINVAL; /* TODO: ENODEV ? */
 	}
 
-	drm = drm_dev_alloc(&qxl_driver, &pdev->dev);
-	if (IS_ERR(drm))
-		return -ENOMEM;
-
 	qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
-	if (!qdev) {
-		ret = -ENOMEM;
-		goto free_drm_device;
-	}
+	if (!qdev)
+		return -ENOMEM;
 
 	ret = pci_enable_device(pdev);
 	if (ret)
-		goto free_drm_device;
-
-	drm->pdev = pdev;
-	pci_set_drvdata(pdev, drm);
-	drm->dev_private = qdev;
+		goto free_dev;
 
-	ret = qxl_device_init(qdev, drm, pdev, ent->driver_data);
+	ret = qxl_device_init(qdev, &qxl_driver, pdev, ent->driver_data);
 	if (ret)
 		goto disable_pci;
 
-	ret = drm_vblank_init(drm, 1);
+	ret = drm_vblank_init(&qdev->ddev, 1);
 	if (ret)
 		goto unload;
 
@@ -102,10 +91,10 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (ret)
 		goto vblank_cleanup;
 
-	drm_kms_helper_poll_init(qdev->ddev);
+	drm_kms_helper_poll_init(&qdev->ddev);
 
 	/* Complete initialization. */
-	ret = drm_dev_register(drm, ent->driver_data);
+	ret = drm_dev_register(&qdev->ddev, ent->driver_data);
 	if (ret)
 		goto modeset_cleanup;
 
@@ -114,14 +103,13 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 modeset_cleanup:
 	qxl_modeset_fini(qdev);
 vblank_cleanup:
-	drm_vblank_cleanup(drm);
+	drm_vblank_cleanup(&qdev->ddev);
 unload:
 	qxl_device_fini(qdev);
 disable_pci:
 	pci_disable_device(pdev);
-free_drm_device:
+free_dev:
 	kfree(qdev);
-	kfree(drm);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 0d877fa61162..785c17b56f73 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -242,9 +242,7 @@ void qxl_debugfs_remove_files(struct qxl_device *qdev);
 struct qxl_device;
 
 struct qxl_device {
-	struct device			*dev;
-	struct drm_device		*ddev;
-	struct pci_dev			*pdev;
+	struct drm_device ddev;
 	unsigned long flags;
 
 	resource_size_t vram_base, vram_size;
@@ -336,8 +334,8 @@ __printf(2,3) void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
 extern const struct drm_ioctl_desc qxl_ioctls[];
 extern int qxl_max_ioctl;
 
-int qxl_device_init(struct qxl_device *qdev, struct drm_device *ddev,
-		    struct pci_dev *pdev,  unsigned long flags);
+int qxl_device_init(struct qxl_device *qdev, struct drm_driver *drv,
+		    struct pci_dev *pdev, unsigned long flags);
 void qxl_device_fini(struct qxl_device *qdev);
 
 int qxl_modeset_init(struct qxl_device *qdev);
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index e6ade6aab54c..d479b7a7abe4 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -268,7 +268,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
 
 	info->par = qfbdev;
 
-	qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
+	qxl_framebuffer_init(&qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
 			     &qxlfb_fb_funcs);
 
 	fb = &qfbdev->qfb.base;
@@ -297,7 +297,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
 			       sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
+	info->apertures->ranges[0].base = qdev->ddev.mode_config.fb_base;
 	info->apertures->ranges[0].size = qdev->vram_size;
 
 	info->fix.mmio_start = 0;
@@ -395,11 +395,10 @@ int qxl_fbdev_init(struct qxl_device *qdev)
 	spin_lock_init(&qfbdev->delayed_ops_lock);
 	INIT_LIST_HEAD(&qfbdev->delayed_ops);
 
-	drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
+	drm_fb_helper_prepare(&qdev->ddev, &qfbdev->helper,
 			      &qxl_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
-				 qxl_num_crtc /* num_crtc - QXL supports just 1 */,
+	ret = drm_fb_helper_init(&qdev->ddev, &qfbdev->helper,
 				 QXLFB_CONN_LIMIT);
 	if (ret)
 		goto free;
@@ -426,7 +425,7 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
 	if (!qdev->mode_info.qfbdev)
 		return;
 
-	qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
+	qxl_fbdev_destroy(&qdev->ddev, qdev->mode_info.qfbdev);
 	kfree(qdev->mode_info.qfbdev);
 	qdev->mode_info.qfbdev = NULL;
 }
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 5a4c8c492683..0b82a87916ae 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -64,7 +64,7 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
 	struct qxl_device *qdev = dev->dev_private;
 	struct drm_qxl_map *qxl_map = data;
 
-	return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
+	return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
 				  &qxl_map->offset);
 }
 
@@ -375,7 +375,7 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
 	byte = param->index / 8;
 	idx = param->index % 8;
 
-	if (qdev->pdev->revision < 4)
+	if (dev->pdev->revision < 4)
 		return -ENOSYS;
 
 	if (byte >= 58)
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 0bf1e20c6e44..23a40106ab53 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -90,7 +90,7 @@ int qxl_irq_init(struct qxl_device *qdev)
 	atomic_set(&qdev->irq_received_cursor, 0);
 	atomic_set(&qdev->irq_received_io_cmd, 0);
 	qdev->irq_received_error = 0;
-	ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
+	ret = drm_irq_install(&qdev->ddev, qdev->ddev.pdev->irq);
 	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed installing irq: %d\n", ret);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index d0666f5dccd6..2dcd5c14cb56 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -116,15 +116,20 @@ static void qxl_gc_work(struct work_struct *work)
 }
 
 int qxl_device_init(struct qxl_device *qdev,
-		    struct drm_device *ddev,
+		    struct drm_driver *drv,
 		    struct pci_dev *pdev,
 		    unsigned long flags)
 {
 	int r, sb;
 
-	qdev->dev = &pdev->dev;
-	qdev->ddev = ddev;
-	qdev->pdev = pdev;
+	r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
+	if (r)
+		return r;
+
+	qdev->ddev.pdev = pdev;
+	pci_set_drvdata(pdev, &qdev->ddev);
+	qdev->ddev.dev_private = qdev;
+
 	qdev->flags = flags;
 
 	mutex_init(&qdev->gem.mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index fa5440dc9a19..dbc13510a1f8 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -93,7 +93,7 @@ int qxl_bo_create(struct qxl_device *qdev,
 	if (bo == NULL)
 		return -ENOMEM;
 	size = roundup(size, PAGE_SIZE);
-	r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
+	r = drm_gem_object_init(&qdev->ddev, &bo->gem_base, size);
 	if (unlikely(r)) {
 		kfree(bo);
 		return r;
@@ -113,7 +113,7 @@ int qxl_bo_create(struct qxl_device *qdev,
 			NULL, NULL, &qxl_ttm_bo_destroy);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS)
-			dev_err(qdev->dev,
+			dev_err(qdev->ddev.dev,
 				"object_init failed for (%lu, 0x%08X)\n",
 				size, domain);
 		return r;
@@ -223,7 +223,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
 
 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
 {
-	struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+	struct drm_device *ddev = bo->gem_base.dev;
 	int r;
 
 	if (bo->pin_count) {
@@ -240,17 +240,17 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
 			*gpu_addr = qxl_bo_gpu_offset(bo);
 	}
 	if (unlikely(r != 0))
-		dev_err(qdev->dev, "%p pin failed\n", bo);
+		dev_err(ddev->dev, "%p pin failed\n", bo);
 	return r;
 }
 
 int qxl_bo_unpin(struct qxl_bo *bo)
 {
-	struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+	struct drm_device *ddev = bo->gem_base.dev;
 	int r, i;
 
 	if (!bo->pin_count) {
-		dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
+		dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
 		return 0;
 	}
 	bo->pin_count--;
@@ -260,7 +260,7 @@ int qxl_bo_unpin(struct qxl_bo *bo)
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 	if (unlikely(r != 0))
-		dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
+		dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
 	return r;
 }
 
@@ -270,9 +270,9 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
 
 	if (list_empty(&qdev->gem.objects))
 		return;
-	dev_err(qdev->dev, "Userspace still has active objects !\n");
+	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
-		dev_err(qdev->dev, "%p %p %lu %lu force free\n",
+		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
 			*((unsigned long *)&bo->gem_base.refcount));
 		mutex_lock(&qdev->gem.mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 4d8311373ba3..0374fd93f4d6 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -34,8 +34,8 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS) {
-			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
-			dev_err(qdev->dev, "%p reserve failed\n", bo);
+			struct drm_device *ddev = bo->gem_base.dev;
+			dev_err(ddev->dev, "%p reserve failed\n", bo);
 		}
 		return r;
 	}
@@ -70,8 +70,8 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS) {
-			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
-			dev_err(qdev->dev, "%p reserve failed for wait\n",
+			struct drm_device *ddev = bo->gem_base.dev;
+			dev_err(ddev->dev, "%p reserve failed for wait\n",
 				bo);
 		}
 		return r;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 3dcc48431015..4e1a40389964 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -408,7 +408,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
 	r = ttm_bo_device_init(&qdev->mman.bdev,
 			       qdev->mman.bo_global_ref.ref.object,
 			       &qxl_bo_driver,
-			       qdev->ddev->anon_inode->i_mapping,
+			       qdev->ddev.anon_inode->i_mapping,
 			       DRM_FILE_PAGE_OFFSET, 0);
 	if (r) {
 		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 00cfb5d2875f..04c0ed41374f 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -638,10 +638,8 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
 					     vhdr->ImageLength,
 					     GFP_KERNEL);
 
-			if (!rdev->bios) {
-				kfree(rdev->bios);
+			if (!rdev->bios)
 				return false;
-			}
 			return true;
 		}
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 6c10a83f3362..2be4fe9c7217 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -366,7 +366,6 @@ int radeon_fbdev_init(struct radeon_device *rdev)
 			      &radeon_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
-				 rdev->num_crtc,
 				 RADEONFB_CONN_LIMIT);
 	if (ret)
 		goto free;
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
index a01efe39a820..f541a4b5ac51 100644
--- a/drivers/gpu/drm/radeon/vce_v1_0.c
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -196,7 +196,7 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
 	memset(&data[5], 0, 44);
 	memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
 
-	data += le32_to_cpu(data[4]) / 4;
+	data += (le32_to_cpu(sign->len) + 64) / 4;
 	data[0] = sign->val[i].sigval[0];
 	data[1] = sign->val[i].sigval[1];
 	data[2] = sign->val[i].sigval[2];
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index b5d3f16cfa12..ff61f6032f2c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -662,7 +662,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 	drm_kms_helper_poll_init(dev);
 
 	if (dev->mode_config.num_connector) {
-		fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
+		fbdev = drm_fbdev_cma_init(dev, 32,
 					   dev->mode_config.num_connector);
 		if (IS_ERR(fbdev))
 			return PTR_ERR(fbdev);
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 6f7f9c59f05b..ad31b3eb408f 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -21,6 +21,16 @@ config ROCKCHIP_ANALOGIX_DP
 	  for the Analogix Core DP driver. If you want to enable DP
 	  on RK3288 based SoC, you should selet this option.
 
+config ROCKCHIP_CDN_DP
+        tristate "Rockchip cdn DP"
+        depends on DRM_ROCKCHIP
+	select SND_SOC_HDMI_CODEC if SND_SOC
+        help
+	  This selects support for Rockchip SoC specific extensions
+	  for the cdn DP driver. If you want to enable Dp on
+	  RK3399 based SoC, you should select this
+	  option.
+
 config ROCKCHIP_DW_HDMI
         tristate "Rockchip specific extensions for Synopsys DW HDMI"
         depends on DRM_ROCKCHIP
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 9746365694ba..c931e2a7d8de 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -7,6 +7,8 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
 rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
 
 obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
+obj-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp.o
+cdn-dp-objs := cdn-dp-core.o cdn-dp-reg.o
 obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
 obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
 obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
new file mode 100644
index 000000000000..9ab67a670885
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -0,0 +1,1260 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/extcon.h>
+#include <linux/firmware.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+
+#include <sound/hdmi-codec.h>
+
+#include "cdn-dp-core.h"
+#include "cdn-dp-reg.h"
+#include "rockchip_drm_vop.h"
+
+#define connector_to_dp(c) \
+		container_of(c, struct cdn_dp_device, connector)
+
+#define encoder_to_dp(c) \
+		container_of(c, struct cdn_dp_device, encoder)
+
+#define GRF_SOC_CON9		0x6224
+#define DP_SEL_VOP_LIT		BIT(12)
+#define GRF_SOC_CON26		0x6268
+#define UPHY_SEL_BIT		3
+#define UPHY_SEL_MASK		BIT(19)
+#define DPTX_HPD_SEL		(3 << 12)
+#define DPTX_HPD_DEL		(2 << 12)
+#define DPTX_HPD_SEL_MASK	(3 << 28)
+
+#define CDN_FW_TIMEOUT_MS	(64 * 1000)
+#define CDN_DPCD_TIMEOUT_MS	5000
+#define CDN_DP_FIRMWARE		"rockchip/dptx.bin"
+
+struct cdn_dp_data {
+	u8 max_phy;
+};
+
+struct cdn_dp_data rk3399_cdn_dp = {
+	.max_phy = 2,
+};
+
+static const struct of_device_id cdn_dp_dt_ids[] = {
+	{ .compatible = "rockchip,rk3399-cdn-dp",
+		.data = (void *)&rk3399_cdn_dp },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
+
+static int cdn_dp_grf_write(struct cdn_dp_device *dp,
+			    unsigned int reg, unsigned int val)
+{
+	int ret;
+
+	ret = clk_prepare_enable(dp->grf_clk);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
+		return ret;
+	}
+
+	ret = regmap_write(dp->grf, reg, val);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
+		return ret;
+	}
+
+	clk_disable_unprepare(dp->grf_clk);
+
+	return 0;
+}
+
+static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
+{
+	int ret;
+	u32 rate;
+
+	ret = clk_prepare_enable(dp->pclk);
+	if (ret < 0) {
+		DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
+		goto err_pclk;
+	}
+
+	ret = clk_prepare_enable(dp->core_clk);
+	if (ret < 0) {
+		DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
+		goto err_core_clk;
+	}
+
+	ret = pm_runtime_get_sync(dp->dev);
+	if (ret < 0) {
+		DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
+		goto err_pclk;
+	}
+
+	reset_control_assert(dp->core_rst);
+	reset_control_assert(dp->dptx_rst);
+	reset_control_assert(dp->apb_rst);
+	reset_control_deassert(dp->core_rst);
+	reset_control_deassert(dp->dptx_rst);
+	reset_control_deassert(dp->apb_rst);
+
+	rate = clk_get_rate(dp->core_clk);
+	if (!rate) {
+		DRM_DEV_ERROR(dp->dev, "get clk rate failed: %d\n", rate);
+		goto err_set_rate;
+	}
+
+	cdn_dp_set_fw_clk(dp, rate);
+	cdn_dp_clock_reset(dp);
+
+	return 0;
+
+err_set_rate:
+	clk_disable_unprepare(dp->core_clk);
+err_core_clk:
+	clk_disable_unprepare(dp->pclk);
+err_pclk:
+	return ret;
+}
+
+static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
+{
+	pm_runtime_put_sync(dp->dev);
+	clk_disable_unprepare(dp->pclk);
+	clk_disable_unprepare(dp->core_clk);
+}
+
+static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
+{
+	struct extcon_dev *edev = port->extcon;
+	union extcon_property_value property;
+	int dptx;
+	u8 lanes;
+
+	dptx = extcon_get_state(edev, EXTCON_DISP_DP);
+	if (dptx > 0) {
+		extcon_get_property(edev, EXTCON_DISP_DP,
+				    EXTCON_PROP_USB_SS, &property);
+		if (property.intval)
+			lanes = 2;
+		else
+			lanes = 4;
+	} else {
+		lanes = 0;
+	}
+
+	return lanes;
+}
+
+static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
+{
+	int ret;
+	u8 value;
+
+	*sink_count = 0;
+	ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
+	if (ret)
+		return ret;
+
+	*sink_count = DP_GET_SINK_COUNT(value);
+	return 0;
+}
+
+static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
+{
+	struct cdn_dp_port *port;
+	int i, lanes;
+
+	for (i = 0; i < dp->ports; i++) {
+		port = dp->port[i];
+		lanes = cdn_dp_get_port_lanes(port);
+		if (lanes)
+			return port;
+	}
+	return NULL;
+}
+
+static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
+	struct cdn_dp_port *port;
+	u8 sink_count = 0;
+
+	if (dp->active_port < 0 || dp->active_port >= dp->ports) {
+		DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
+		return false;
+	}
+
+	port = dp->port[dp->active_port];
+
+	/*
+	 * Attempt to read sink count, retry in case the sink may not be ready.
+	 *
+	 * Sinks are *supposed* to come up within 1ms from an off state, but
+	 * some docks need more time to power up.
+	 */
+	while (time_before(jiffies, timeout)) {
+		if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
+			return false;
+
+		if (!cdn_dp_get_sink_count(dp, &sink_count))
+			return sink_count ? true : false;
+
+		usleep_range(5000, 10000);
+	}
+
+	DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
+	return false;
+}
+
+static enum drm_connector_status
+cdn_dp_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct cdn_dp_device *dp = connector_to_dp(connector);
+	enum drm_connector_status status = connector_status_disconnected;
+
+	mutex_lock(&dp->lock);
+	if (dp->connected)
+		status = connector_status_connected;
+	mutex_unlock(&dp->lock);
+
+	return status;
+}
+
+static void cdn_dp_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = cdn_dp_connector_detect,
+	.destroy = cdn_dp_connector_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int cdn_dp_connector_get_modes(struct drm_connector *connector)
+{
+	struct cdn_dp_device *dp = connector_to_dp(connector);
+	struct edid *edid;
+	int ret = 0;
+
+	mutex_lock(&dp->lock);
+	edid = dp->edid;
+	if (edid) {
+		DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
+				  edid->width_cm, edid->height_cm);
+
+		dp->sink_has_audio = drm_detect_monitor_audio(edid);
+		ret = drm_add_edid_modes(connector, edid);
+		if (ret) {
+			drm_mode_connector_update_edid_property(connector,
+								edid);
+			drm_edid_to_eld(connector, edid);
+		}
+	}
+	mutex_unlock(&dp->lock);
+
+	return ret;
+}
+
+static struct drm_encoder *
+cdn_dp_connector_best_encoder(struct drm_connector *connector)
+{
+	struct cdn_dp_device *dp = connector_to_dp(connector);
+
+	return &dp->encoder;
+}
+
+static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
+				       struct drm_display_mode *mode)
+{
+	struct cdn_dp_device *dp = connector_to_dp(connector);
+	struct drm_display_info *display_info = &dp->connector.display_info;
+	u32 requested, actual, rate, sink_max, source_max = 0;
+	u8 lanes, bpc;
+
+	/* If DP is disconnected, every mode is invalid */
+	if (!dp->connected)
+		return MODE_BAD;
+
+	switch (display_info->bpc) {
+	case 10:
+		bpc = 10;
+		break;
+	case 6:
+		bpc = 6;
+		break;
+	default:
+		bpc = 8;
+		break;
+	}
+
+	requested = mode->clock * bpc * 3 / 1000;
+
+	source_max = dp->lanes;
+	sink_max = drm_dp_max_lane_count(dp->dpcd);
+	lanes = min(source_max, sink_max);
+
+	source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
+	sink_max = drm_dp_max_link_rate(dp->dpcd);
+	rate = min(source_max, sink_max);
+
+	actual = rate * lanes / 100;
+
+	/* efficiency is about 0.8 */
+	actual = actual * 8 / 10;
+
+	if (requested > actual) {
+		DRM_DEV_DEBUG_KMS(dp->dev,
+				  "requested=%d, actual=%d, clock=%d\n",
+				  requested, actual, mode->clock);
+		return MODE_CLOCK_HIGH;
+	}
+
+	return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
+	.get_modes = cdn_dp_connector_get_modes,
+	.best_encoder = cdn_dp_connector_best_encoder,
+	.mode_valid = cdn_dp_connector_mode_valid,
+};
+
+static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
+{
+	int ret;
+	const u32 *iram_data, *dram_data;
+	const struct firmware *fw = dp->fw;
+	const struct cdn_firmware_header *hdr;
+
+	hdr = (struct cdn_firmware_header *)fw->data;
+	if (fw->size != le32_to_cpu(hdr->size_bytes)) {
+		DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
+		return -EINVAL;
+	}
+
+	iram_data = (const u32 *)(fw->data + hdr->header_size);
+	dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
+
+	ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
+				   dram_data, hdr->dram_size);
+	if (ret)
+		return ret;
+
+	ret = cdn_dp_set_firmware_active(dp, true);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
+		return ret;
+	}
+
+	return cdn_dp_event_config(dp);
+}
+
+static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
+{
+	int ret;
+
+	if (!cdn_dp_check_sink_connection(dp))
+		return -ENODEV;
+
+	ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
+			       DP_RECEIVER_CAP_SIZE);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
+		return ret;
+	}
+
+	kfree(dp->edid);
+	dp->edid = drm_do_get_edid(&dp->connector,
+				   cdn_dp_get_edid_block, dp);
+	return 0;
+}
+
+static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
+{
+	union extcon_property_value property;
+	int ret;
+
+	ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
+			       (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK);
+	if (ret)
+		return ret;
+
+	if (!port->phy_enabled) {
+		ret = phy_power_on(port->phy);
+		if (ret) {
+			DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
+				      ret);
+			goto err_phy;
+		}
+		port->phy_enabled = true;
+	}
+
+	ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
+			       DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
+		goto err_power_on;
+	}
+
+	ret = cdn_dp_get_hpd_status(dp);
+	if (ret <= 0) {
+		if (!ret)
+			DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
+		goto err_power_on;
+	}
+
+	ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
+				  EXTCON_PROP_USB_TYPEC_POLARITY, &property);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "get property failed\n");
+		goto err_power_on;
+	}
+
+	port->lanes = cdn_dp_get_port_lanes(port);
+	ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
+			      ret);
+		goto err_power_on;
+	}
+
+	dp->active_port = port->id;
+	return 0;
+
+err_power_on:
+	if (phy_power_off(port->phy))
+		DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
+	else
+		port->phy_enabled = false;
+
+err_phy:
+	cdn_dp_grf_write(dp, GRF_SOC_CON26,
+			 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
+	return ret;
+}
+
+static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
+			      struct cdn_dp_port *port)
+{
+	int ret;
+
+	if (port->phy_enabled) {
+		ret = phy_power_off(port->phy);
+		if (ret) {
+			DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
+			return ret;
+		}
+	}
+
+	port->phy_enabled = false;
+	port->lanes = 0;
+	dp->active_port = -1;
+	return 0;
+}
+
+static int cdn_dp_disable(struct cdn_dp_device *dp)
+{
+	int ret, i;
+
+	if (!dp->active)
+		return 0;
+
+	for (i = 0; i < dp->ports; i++)
+		cdn_dp_disable_phy(dp, dp->port[i]);
+
+	ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
+			       DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
+			      ret);
+		return ret;
+	}
+
+	cdn_dp_set_firmware_active(dp, false);
+	cdn_dp_clk_disable(dp);
+	dp->active = false;
+	dp->link.rate = 0;
+	dp->link.num_lanes = 0;
+	if (!dp->connected) {
+		kfree(dp->edid);
+		dp->edid = NULL;
+	}
+
+	return 0;
+}
+
+static int cdn_dp_enable(struct cdn_dp_device *dp)
+{
+	int ret, i, lanes;
+	struct cdn_dp_port *port;
+
+	port = cdn_dp_connected_port(dp);
+	if (!port) {
+		DRM_DEV_ERROR(dp->dev,
+			      "Can't enable without connection\n");
+		return -ENODEV;
+	}
+
+	if (dp->active)
+		return 0;
+
+	ret = cdn_dp_clk_enable(dp);
+	if (ret)
+		return ret;
+
+	ret = cdn_dp_firmware_init(dp);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
+		goto err_clk_disable;
+	}
+
+	/* only enable the port that connected with downstream device */
+	for (i = port->id; i < dp->ports; i++) {
+		port = dp->port[i];
+		lanes = cdn_dp_get_port_lanes(port);
+		if (lanes) {
+			ret = cdn_dp_enable_phy(dp, port);
+			if (ret)
+				continue;
+
+			ret = cdn_dp_get_sink_capability(dp);
+			if (ret) {
+				cdn_dp_disable_phy(dp, port);
+			} else {
+				dp->active = true;
+				dp->lanes = port->lanes;
+				return 0;
+			}
+		}
+	}
+
+err_clk_disable:
+	cdn_dp_clk_disable(dp);
+	return ret;
+}
+
+static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted)
+{
+	struct cdn_dp_device *dp = encoder_to_dp(encoder);
+	struct drm_display_info *display_info = &dp->connector.display_info;
+	struct video_info *video = &dp->video_info;
+
+	switch (display_info->bpc) {
+	case 10:
+		video->color_depth = 10;
+		break;
+	case 6:
+		video->color_depth = 6;
+		break;
+	default:
+		video->color_depth = 8;
+		break;
+	}
+
+	video->color_fmt = PXL_RGB;
+	video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+	video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+	memcpy(&dp->mode, adjusted, sizeof(*mode));
+}
+
+static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
+{
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	struct cdn_dp_port *port = cdn_dp_connected_port(dp);
+	u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
+
+	if (!port || !dp->link.rate || !dp->link.num_lanes)
+		return false;
+
+	if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
+			     DP_LINK_STATUS_SIZE)) {
+		DRM_ERROR("Failed to get link status\n");
+		return false;
+	}
+
+	/* if link training is requested we should perform it always */
+	return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
+}
+
+static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
+{
+	struct cdn_dp_device *dp = encoder_to_dp(encoder);
+	int ret, val;
+	struct rockchip_crtc_state *state;
+
+	ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+	if (ret < 0) {
+		DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
+		return;
+	}
+
+	DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
+			  (ret) ? "LIT" : "BIG");
+	state = to_rockchip_crtc_state(encoder->crtc->state);
+	if (ret) {
+		val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
+		state->output_mode = ROCKCHIP_OUT_MODE_P888;
+	} else {
+		val = DP_SEL_VOP_LIT << 16;
+		state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+	}
+
+	ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
+	if (ret)
+		return;
+
+	mutex_lock(&dp->lock);
+
+	ret = cdn_dp_enable(dp);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
+			      ret);
+		goto out;
+	}
+	if (!cdn_dp_check_link_status(dp)) {
+		ret = cdn_dp_train_link(dp);
+		if (ret) {
+			DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
+			goto out;
+		}
+	}
+
+	ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
+		goto out;
+	}
+
+	ret = cdn_dp_config_video(dp);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
+		goto out;
+	}
+
+	ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&dp->lock);
+}
+
+static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
+{
+	struct cdn_dp_device *dp = encoder_to_dp(encoder);
+	int ret;
+
+	mutex_lock(&dp->lock);
+	if (dp->active) {
+		ret = cdn_dp_disable(dp);
+		if (ret) {
+			DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
+				      ret);
+		}
+	}
+	mutex_unlock(&dp->lock);
+
+	/*
+	 * In the following 2 cases, we need to run the event_work to re-enable
+	 * the DP:
+	 * 1. If there is not just one port device is connected, and remove one
+	 *    device from a port, the DP will be disabled here, at this case,
+	 *    run the event_work to re-open DP for the other port.
+	 * 2. If re-training or re-config failed, the DP will be disabled here.
+	 *    run the event_work to re-connect it.
+	 */
+	if (!dp->connected && cdn_dp_connected_port(dp))
+		schedule_work(&dp->event_work);
+}
+
+static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
+				       struct drm_crtc_state *crtc_state,
+				       struct drm_connector_state *conn_state)
+{
+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+	s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+	s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
+
+	return 0;
+}
+
+static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
+	.mode_set = cdn_dp_encoder_mode_set,
+	.enable = cdn_dp_encoder_enable,
+	.disable = cdn_dp_encoder_disable,
+	.atomic_check = cdn_dp_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
+{
+	struct device *dev = dp->dev;
+	struct device_node *np = dev->of_node;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource *res;
+
+	dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+	if (IS_ERR(dp->grf)) {
+		DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
+		return PTR_ERR(dp->grf);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dp->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dp->regs)) {
+		DRM_DEV_ERROR(dev, "ioremap reg failed\n");
+		return PTR_ERR(dp->regs);
+	}
+
+	dp->core_clk = devm_clk_get(dev, "core-clk");
+	if (IS_ERR(dp->core_clk)) {
+		DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
+		return PTR_ERR(dp->core_clk);
+	}
+
+	dp->pclk = devm_clk_get(dev, "pclk");
+	if (IS_ERR(dp->pclk)) {
+		DRM_DEV_ERROR(dev, "cannot get pclk\n");
+		return PTR_ERR(dp->pclk);
+	}
+
+	dp->spdif_clk = devm_clk_get(dev, "spdif");
+	if (IS_ERR(dp->spdif_clk)) {
+		DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
+		return PTR_ERR(dp->spdif_clk);
+	}
+
+	dp->grf_clk = devm_clk_get(dev, "grf");
+	if (IS_ERR(dp->grf_clk)) {
+		DRM_DEV_ERROR(dev, "cannot get grf clk\n");
+		return PTR_ERR(dp->grf_clk);
+	}
+
+	dp->spdif_rst = devm_reset_control_get(dev, "spdif");
+	if (IS_ERR(dp->spdif_rst)) {
+		DRM_DEV_ERROR(dev, "no spdif reset control found\n");
+		return PTR_ERR(dp->spdif_rst);
+	}
+
+	dp->dptx_rst = devm_reset_control_get(dev, "dptx");
+	if (IS_ERR(dp->dptx_rst)) {
+		DRM_DEV_ERROR(dev, "no uphy reset control found\n");
+		return PTR_ERR(dp->dptx_rst);
+	}
+
+	dp->core_rst = devm_reset_control_get(dev, "core");
+	if (IS_ERR(dp->core_rst)) {
+		DRM_DEV_ERROR(dev, "no core reset control found\n");
+		return PTR_ERR(dp->core_rst);
+	}
+
+	dp->apb_rst = devm_reset_control_get(dev, "apb");
+	if (IS_ERR(dp->apb_rst)) {
+		DRM_DEV_ERROR(dev, "no apb reset control found\n");
+		return PTR_ERR(dp->apb_rst);
+	}
+
+	return 0;
+}
+
+static int cdn_dp_audio_hw_params(struct device *dev,  void *data,
+				  struct hdmi_codec_daifmt *daifmt,
+				  struct hdmi_codec_params *params)
+{
+	struct cdn_dp_device *dp = dev_get_drvdata(dev);
+	struct audio_info audio = {
+		.sample_width = params->sample_width,
+		.sample_rate = params->sample_rate,
+		.channels = params->channels,
+	};
+	int ret;
+
+	mutex_lock(&dp->lock);
+	if (!dp->active) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	switch (daifmt->fmt) {
+	case HDMI_I2S:
+		audio.format = AFMT_I2S;
+		break;
+	case HDMI_SPDIF:
+		audio.format = AFMT_SPDIF;
+		break;
+	default:
+		DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = cdn_dp_audio_config(dp, &audio);
+	if (!ret)
+		dp->audio_info = audio;
+
+out:
+	mutex_unlock(&dp->lock);
+	return ret;
+}
+
+static void cdn_dp_audio_shutdown(struct device *dev, void *data)
+{
+	struct cdn_dp_device *dp = dev_get_drvdata(dev);
+	int ret;
+
+	mutex_lock(&dp->lock);
+	if (!dp->active)
+		goto out;
+
+	ret = cdn_dp_audio_stop(dp, &dp->audio_info);
+	if (!ret)
+		dp->audio_info.format = AFMT_UNUSED;
+out:
+	mutex_unlock(&dp->lock);
+}
+
+static int cdn_dp_audio_digital_mute(struct device *dev, void *data,
+				     bool enable)
+{
+	struct cdn_dp_device *dp = dev_get_drvdata(dev);
+	int ret;
+
+	mutex_lock(&dp->lock);
+	if (!dp->active) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = cdn_dp_audio_mute(dp, enable);
+
+out:
+	mutex_unlock(&dp->lock);
+	return ret;
+}
+
+static int cdn_dp_audio_get_eld(struct device *dev, void *data,
+				u8 *buf, size_t len)
+{
+	struct cdn_dp_device *dp = dev_get_drvdata(dev);
+
+	memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
+
+	return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+	.hw_params = cdn_dp_audio_hw_params,
+	.audio_shutdown = cdn_dp_audio_shutdown,
+	.digital_mute = cdn_dp_audio_digital_mute,
+	.get_eld = cdn_dp_audio_get_eld,
+};
+
+static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
+				   struct device *dev)
+{
+	struct hdmi_codec_pdata codec_data = {
+		.i2s = 1,
+		.spdif = 1,
+		.ops = &audio_codec_ops,
+		.max_i2s_channels = 8,
+	};
+
+	dp->audio_pdev = platform_device_register_data(
+			 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+			 &codec_data, sizeof(codec_data));
+
+	return PTR_ERR_OR_ZERO(dp->audio_pdev);
+}
+
+static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
+{
+	int ret;
+	unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
+	unsigned long sleep = 1000;
+
+	WARN_ON(!mutex_is_locked(&dp->lock));
+
+	if (dp->fw_loaded)
+		return 0;
+
+	/* Drop the lock before getting the firmware to avoid blocking boot */
+	mutex_unlock(&dp->lock);
+
+	while (time_before(jiffies, timeout)) {
+		ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
+		if (ret == -ENOENT) {
+			msleep(sleep);
+			sleep *= 2;
+			continue;
+		} else if (ret) {
+			DRM_DEV_ERROR(dp->dev,
+				      "failed to request firmware: %d\n", ret);
+			goto out;
+		}
+
+		dp->fw_loaded = true;
+		ret = 0;
+		goto out;
+	}
+
+	DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
+	ret = -ETIMEDOUT;
+out:
+	mutex_lock(&dp->lock);
+	return ret;
+}
+
+static void cdn_dp_pd_event_work(struct work_struct *work)
+{
+	struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
+						event_work);
+	struct drm_connector *connector = &dp->connector;
+	enum drm_connector_status old_status;
+
+	int ret;
+
+	mutex_lock(&dp->lock);
+
+	if (dp->suspended)
+		goto out;
+
+	ret = cdn_dp_request_firmware(dp);
+	if (ret)
+		goto out;
+
+	dp->connected = true;
+
+	/* Not connected, notify userspace to disable the block */
+	if (!cdn_dp_connected_port(dp)) {
+		DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
+		dp->connected = false;
+
+	/* Connected but not enabled, enable the block */
+	} else if (!dp->active) {
+		DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
+		ret = cdn_dp_enable(dp);
+		if (ret) {
+			DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
+			dp->connected = false;
+		}
+
+	/* Enabled and connected to a dongle without a sink, notify userspace */
+	} else if (!cdn_dp_check_sink_connection(dp)) {
+		DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
+		dp->connected = false;
+
+	/* Enabled and connected with a sink, re-train if requested */
+	} else if (!cdn_dp_check_link_status(dp)) {
+		unsigned int rate = dp->link.rate;
+		unsigned int lanes = dp->link.num_lanes;
+		struct drm_display_mode *mode = &dp->mode;
+
+		DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
+		ret = cdn_dp_train_link(dp);
+		if (ret) {
+			dp->connected = false;
+			DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
+			goto out;
+		}
+
+		/* If training result is changed, update the video config */
+		if (mode->clock &&
+		    (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
+			ret = cdn_dp_config_video(dp);
+			if (ret) {
+				dp->connected = false;
+				DRM_DEV_ERROR(dp->dev,
+					      "Failed to config video %d\n",
+					      ret);
+			}
+		}
+	}
+
+out:
+	mutex_unlock(&dp->lock);
+
+	old_status = connector->status;
+	connector->status = connector->funcs->detect(connector, false);
+	if (old_status != connector->status)
+		drm_kms_helper_hotplug_event(dp->drm_dev);
+}
+
+static int cdn_dp_pd_event(struct notifier_block *nb,
+			   unsigned long event, void *priv)
+{
+	struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
+						event_nb);
+	struct cdn_dp_device *dp = port->dp;
+
+	/*
+	 * It would be nice to be able to just do the work inline right here.
+	 * However, we need to make a bunch of calls that might sleep in order
+	 * to turn on the block/phy, so use a worker instead.
+	 */
+	schedule_work(&dp->event_work);
+
+	return NOTIFY_DONE;
+}
+
+static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
+{
+	struct cdn_dp_device *dp = dev_get_drvdata(dev);
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct cdn_dp_port *port;
+	struct drm_device *drm_dev = data;
+	int ret, i;
+
+	ret = cdn_dp_parse_dt(dp);
+	if (ret < 0)
+		return ret;
+
+	dp->drm_dev = drm_dev;
+	dp->connected = false;
+	dp->active = false;
+	dp->active_port = -1;
+
+	INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
+
+	encoder = &dp->encoder;
+
+	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+							     dev->of_node);
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
+			       DRM_MODE_ENCODER_TMDS, NULL);
+	if (ret) {
+		DRM_ERROR("failed to initialize encoder with drm\n");
+		return ret;
+	}
+
+	drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
+
+	connector = &dp->connector;
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+	connector->dpms = DRM_MODE_DPMS_OFF;
+
+	ret = drm_connector_init(drm_dev, connector,
+				 &cdn_dp_atomic_connector_funcs,
+				 DRM_MODE_CONNECTOR_DisplayPort);
+	if (ret) {
+		DRM_ERROR("failed to initialize connector with drm\n");
+		goto err_free_encoder;
+	}
+
+	drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
+
+	ret = drm_mode_connector_attach_encoder(connector, encoder);
+	if (ret) {
+		DRM_ERROR("failed to attach connector and encoder\n");
+		goto err_free_connector;
+	}
+
+	cdn_dp_audio_codec_init(dp, dev);
+
+	for (i = 0; i < dp->ports; i++) {
+		port = dp->port[i];
+
+		port->event_nb.notifier_call = cdn_dp_pd_event;
+		ret = devm_extcon_register_notifier(dp->dev, port->extcon,
+						    EXTCON_DISP_DP,
+						    &port->event_nb);
+		if (ret) {
+			DRM_DEV_ERROR(dev,
+				      "register EXTCON_DISP_DP notifier err\n");
+			goto err_free_connector;
+		}
+	}
+
+	pm_runtime_enable(dev);
+
+	schedule_work(&dp->event_work);
+
+	return 0;
+
+err_free_connector:
+	drm_connector_cleanup(connector);
+err_free_encoder:
+	drm_encoder_cleanup(encoder);
+	return ret;
+}
+
+static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
+{
+	struct cdn_dp_device *dp = dev_get_drvdata(dev);
+	struct drm_encoder *encoder = &dp->encoder;
+	struct drm_connector *connector = &dp->connector;
+
+	cancel_work_sync(&dp->event_work);
+	platform_device_unregister(dp->audio_pdev);
+	cdn_dp_encoder_disable(encoder);
+	encoder->funcs->destroy(encoder);
+	connector->funcs->destroy(connector);
+
+	pm_runtime_disable(dev);
+	release_firmware(dp->fw);
+	kfree(dp->edid);
+	dp->edid = NULL;
+}
+
+static const struct component_ops cdn_dp_component_ops = {
+	.bind = cdn_dp_bind,
+	.unbind = cdn_dp_unbind,
+};
+
+int cdn_dp_suspend(struct device *dev)
+{
+	struct cdn_dp_device *dp = dev_get_drvdata(dev);
+	int ret = 0;
+
+	mutex_lock(&dp->lock);
+	if (dp->active)
+		ret = cdn_dp_disable(dp);
+	dp->suspended = true;
+	mutex_unlock(&dp->lock);
+
+	return ret;
+}
+
+int cdn_dp_resume(struct device *dev)
+{
+	struct cdn_dp_device *dp = dev_get_drvdata(dev);
+
+	mutex_lock(&dp->lock);
+	dp->suspended = false;
+	if (dp->fw_loaded)
+		schedule_work(&dp->event_work);
+	mutex_unlock(&dp->lock);
+
+	return 0;
+}
+
+static int cdn_dp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	struct cdn_dp_data *dp_data;
+	struct cdn_dp_port *port;
+	struct cdn_dp_device *dp;
+	struct extcon_dev *extcon;
+	struct phy *phy;
+	int i;
+
+	dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+	if (!dp)
+		return -ENOMEM;
+	dp->dev = dev;
+
+	match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
+	dp_data = (struct cdn_dp_data *)match->data;
+
+	for (i = 0; i < dp_data->max_phy; i++) {
+		extcon = extcon_get_edev_by_phandle(dev, i);
+		phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
+
+		if (PTR_ERR(extcon) == -EPROBE_DEFER ||
+		    PTR_ERR(phy) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		if (IS_ERR(extcon) || IS_ERR(phy))
+			continue;
+
+		port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+		if (!dp)
+			return -ENOMEM;
+
+		port->extcon = extcon;
+		port->phy = phy;
+		port->dp = dp;
+		port->id = i;
+		dp->port[dp->ports++] = port;
+	}
+
+	if (!dp->ports) {
+		DRM_DEV_ERROR(dev, "missing extcon or phy\n");
+		return -EINVAL;
+	}
+
+	mutex_init(&dp->lock);
+	dev_set_drvdata(dev, dp);
+
+	return component_add(dev, &cdn_dp_component_ops);
+}
+
+static int cdn_dp_remove(struct platform_device *pdev)
+{
+	struct cdn_dp_device *dp = platform_get_drvdata(pdev);
+
+	cdn_dp_suspend(dp->dev);
+	component_del(&pdev->dev, &cdn_dp_component_ops);
+
+	return 0;
+}
+
+static void cdn_dp_shutdown(struct platform_device *pdev)
+{
+	struct cdn_dp_device *dp = platform_get_drvdata(pdev);
+
+	cdn_dp_suspend(dp->dev);
+}
+
+static const struct dev_pm_ops cdn_dp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
+				cdn_dp_resume)
+};
+
+static struct platform_driver cdn_dp_driver = {
+	.probe = cdn_dp_probe,
+	.remove = cdn_dp_remove,
+	.shutdown = cdn_dp_shutdown,
+	.driver = {
+		   .name = "cdn-dp",
+		   .owner = THIS_MODULE,
+		   .of_match_table = of_match_ptr(cdn_dp_dt_ids),
+		   .pm = &cdn_dp_pm_ops,
+	},
+};
+
+module_platform_driver(cdn_dp_driver);
+
+MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_DESCRIPTION("cdn DP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
new file mode 100644
index 000000000000..f57e296401b8
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 Chris Zhong <zyw@rock-chips.com>
+ * Copyright (C) 2016 ROCKCHIP, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CDN_DP_CORE_H
+#define _CDN_DP_CORE_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
+#include "rockchip_drm_drv.h"
+
+#define MAX_PHY		2
+
+enum audio_format {
+	AFMT_I2S = 0,
+	AFMT_SPDIF = 1,
+	AFMT_UNUSED,
+};
+
+struct audio_info {
+	enum audio_format format;
+	int sample_rate;
+	int channels;
+	int sample_width;
+};
+
+enum vic_pxl_encoding_format {
+	PXL_RGB = 0x1,
+	YCBCR_4_4_4 = 0x2,
+	YCBCR_4_2_2 = 0x4,
+	YCBCR_4_2_0 = 0x8,
+	Y_ONLY = 0x10,
+};
+
+struct video_info {
+	bool h_sync_polarity;
+	bool v_sync_polarity;
+	bool interlaced;
+	int color_depth;
+	enum vic_pxl_encoding_format color_fmt;
+};
+
+struct cdn_firmware_header {
+	u32 size_bytes; /* size of the entire header+image(s) in bytes */
+	u32 header_size; /* size of just the header in bytes */
+	u32 iram_size; /* size of iram */
+	u32 dram_size; /* size of dram */
+};
+
+struct cdn_dp_port {
+	struct cdn_dp_device *dp;
+	struct notifier_block event_nb;
+	struct extcon_dev *extcon;
+	struct phy *phy;
+	u8 lanes;
+	bool phy_enabled;
+	u8 id;
+};
+
+struct cdn_dp_device {
+	struct device *dev;
+	struct drm_device *drm_dev;
+	struct drm_connector connector;
+	struct drm_encoder encoder;
+	struct drm_display_mode mode;
+	struct platform_device *audio_pdev;
+	struct work_struct event_work;
+	struct edid *edid;
+
+	struct mutex lock;
+	bool connected;
+	bool active;
+	bool suspended;
+
+	const struct firmware *fw;	/* cdn dp firmware */
+	unsigned int fw_version;	/* cdn fw version */
+	bool fw_loaded;
+
+	void __iomem *regs;
+	struct regmap *grf;
+	struct clk *core_clk;
+	struct clk *pclk;
+	struct clk *spdif_clk;
+	struct clk *grf_clk;
+	struct reset_control *spdif_rst;
+	struct reset_control *dptx_rst;
+	struct reset_control *apb_rst;
+	struct reset_control *core_rst;
+	struct audio_info audio_info;
+	struct video_info video_info;
+	struct drm_dp_link link;
+	struct cdn_dp_port *port[MAX_PHY];
+	u8 ports;
+	u8 lanes;
+	int active_port;
+
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	bool sink_has_audio;
+};
+#endif  /* _CDN_DP_CORE_H */
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
new file mode 100644
index 000000000000..319dbbaa3609
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -0,0 +1,979 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "cdn-dp-core.h"
+#include "cdn-dp-reg.h"
+
+#define CDN_DP_SPDIF_CLK		200000000
+#define FW_ALIVE_TIMEOUT_US		1000000
+#define MAILBOX_RETRY_US		1000
+#define MAILBOX_TIMEOUT_US		5000000
+#define LINK_TRAINING_RETRY_MS		20
+#define LINK_TRAINING_TIMEOUT_MS	500
+
+void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, u32 clk)
+{
+	writel(clk / 1000000, dp->regs + SW_CLK_H);
+}
+
+void cdn_dp_clock_reset(struct cdn_dp_device *dp)
+{
+	u32 val;
+
+	val = DPTX_FRMR_DATA_CLK_RSTN_EN |
+	      DPTX_FRMR_DATA_CLK_EN |
+	      DPTX_PHY_DATA_RSTN_EN |
+	      DPTX_PHY_DATA_CLK_EN |
+	      DPTX_PHY_CHAR_RSTN_EN |
+	      DPTX_PHY_CHAR_CLK_EN |
+	      SOURCE_AUX_SYS_CLK_RSTN_EN |
+	      SOURCE_AUX_SYS_CLK_EN |
+	      DPTX_SYS_CLK_RSTN_EN |
+	      DPTX_SYS_CLK_EN |
+	      CFG_DPTX_VIF_CLK_RSTN_EN |
+	      CFG_DPTX_VIF_CLK_EN;
+	writel(val, dp->regs + SOURCE_DPTX_CAR);
+
+	val = SOURCE_PHY_RSTN_EN | SOURCE_PHY_CLK_EN;
+	writel(val, dp->regs + SOURCE_PHY_CAR);
+
+	val = SOURCE_PKT_SYS_RSTN_EN |
+	      SOURCE_PKT_SYS_CLK_EN |
+	      SOURCE_PKT_DATA_RSTN_EN |
+	      SOURCE_PKT_DATA_CLK_EN;
+	writel(val, dp->regs + SOURCE_PKT_CAR);
+
+	val = SPDIF_CDR_CLK_RSTN_EN |
+	      SPDIF_CDR_CLK_EN |
+	      SOURCE_AIF_SYS_RSTN_EN |
+	      SOURCE_AIF_SYS_CLK_EN |
+	      SOURCE_AIF_CLK_RSTN_EN |
+	      SOURCE_AIF_CLK_EN;
+	writel(val, dp->regs + SOURCE_AIF_CAR);
+
+	val = SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN |
+	      SOURCE_CIPHER_SYS_CLK_EN |
+	      SOURCE_CIPHER_CHAR_CLK_RSTN_EN |
+	      SOURCE_CIPHER_CHAR_CLK_EN;
+	writel(val, dp->regs + SOURCE_CIPHER_CAR);
+
+	val = SOURCE_CRYPTO_SYS_CLK_RSTN_EN |
+	      SOURCE_CRYPTO_SYS_CLK_EN;
+	writel(val, dp->regs + SOURCE_CRYPTO_CAR);
+
+	/* enable Mailbox and PIF interrupt */
+	writel(0, dp->regs + APB_INT_MASK);
+}
+
+static int cdn_dp_mailbox_read(struct cdn_dp_device *dp)
+{
+	int val, ret;
+
+	ret = readx_poll_timeout(readl, dp->regs + MAILBOX_EMPTY_ADDR,
+				 val, !val, MAILBOX_RETRY_US,
+				 MAILBOX_TIMEOUT_US);
+	if (ret < 0)
+		return ret;
+
+	return readl(dp->regs + MAILBOX0_RD_DATA) & 0xff;
+}
+
+static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val)
+{
+	int ret, full;
+
+	ret = readx_poll_timeout(readl, dp->regs + MAILBOX_FULL_ADDR,
+				 full, !full, MAILBOX_RETRY_US,
+				 MAILBOX_TIMEOUT_US);
+	if (ret < 0)
+		return ret;
+
+	writel(val, dp->regs + MAILBOX0_WR_DATA);
+
+	return 0;
+}
+
+static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
+					   u8 module_id, u8 opcode,
+					   u8 req_size)
+{
+	u32 mbox_size, i;
+	u8 header[4];
+	int ret;
+
+	/* read the header of the message */
+	for (i = 0; i < 4; i++) {
+		ret = cdn_dp_mailbox_read(dp);
+		if (ret < 0)
+			return ret;
+
+		header[i] = ret;
+	}
+
+	mbox_size = (header[2] << 8) | header[3];
+
+	if (opcode != header[0] || module_id != header[1] ||
+	    req_size != mbox_size) {
+		/*
+		 * If the message in mailbox is not what we want, we need to
+		 * clear the mailbox by reading its contents.
+		 */
+		for (i = 0; i < mbox_size; i++)
+			if (cdn_dp_mailbox_read(dp) < 0)
+				break;
+
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
+				       u8 *buff, u8 buff_size)
+{
+	u32 i;
+	int ret;
+
+	for (i = 0; i < buff_size; i++) {
+		ret = cdn_dp_mailbox_read(dp);
+		if (ret < 0)
+			return ret;
+
+		buff[i] = ret;
+	}
+
+	return 0;
+}
+
+static int cdn_dp_mailbox_send(struct cdn_dp_device *dp, u8 module_id,
+			       u8 opcode, u16 size, u8 *message)
+{
+	u8 header[4];
+	int ret, i;
+
+	header[0] = opcode;
+	header[1] = module_id;
+	header[2] = (size >> 8) & 0xff;
+	header[3] = size & 0xff;
+
+	for (i = 0; i < 4; i++) {
+		ret = cdp_dp_mailbox_write(dp, header[i]);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < size; i++) {
+		ret = cdp_dp_mailbox_write(dp, message[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int cdn_dp_reg_write(struct cdn_dp_device *dp, u16 addr, u32 val)
+{
+	u8 msg[6];
+
+	msg[0] = (addr >> 8) & 0xff;
+	msg[1] = addr & 0xff;
+	msg[2] = (val >> 24) & 0xff;
+	msg[3] = (val >> 16) & 0xff;
+	msg[4] = (val >> 8) & 0xff;
+	msg[5] = val & 0xff;
+	return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_REGISTER,
+				   sizeof(msg), msg);
+}
+
+static int cdn_dp_reg_write_bit(struct cdn_dp_device *dp, u16 addr,
+				u8 start_bit, u8 bits_no, u32 val)
+{
+	u8 field[8];
+
+	field[0] = (addr >> 8) & 0xff;
+	field[1] = addr & 0xff;
+	field[2] = start_bit;
+	field[3] = bits_no;
+	field[4] = (val >> 24) & 0xff;
+	field[5] = (val >> 16) & 0xff;
+	field[6] = (val >> 8) & 0xff;
+	field[7] = val & 0xff;
+
+	return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_FIELD,
+				   sizeof(field), field);
+}
+
+int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len)
+{
+	u8 msg[5], reg[5];
+	int ret;
+
+	msg[0] = (len >> 8) & 0xff;
+	msg[1] = len & 0xff;
+	msg[2] = (addr >> 16) & 0xff;
+	msg[3] = (addr >> 8) & 0xff;
+	msg[4] = addr & 0xff;
+	ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_DPCD,
+				  sizeof(msg), msg);
+	if (ret)
+		goto err_dpcd_read;
+
+	ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+					      DPTX_READ_DPCD,
+					      sizeof(reg) + len);
+	if (ret)
+		goto err_dpcd_read;
+
+	ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
+	if (ret)
+		goto err_dpcd_read;
+
+	ret = cdn_dp_mailbox_read_receive(dp, data, len);
+
+err_dpcd_read:
+	return ret;
+}
+
+int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value)
+{
+	u8 msg[6], reg[5];
+	int ret;
+
+	msg[0] = 0;
+	msg[1] = 1;
+	msg[2] = (addr >> 16) & 0xff;
+	msg[3] = (addr >> 8) & 0xff;
+	msg[4] = addr & 0xff;
+	msg[5] = value;
+	ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_DPCD,
+				  sizeof(msg), msg);
+	if (ret)
+		goto err_dpcd_write;
+
+	ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+					      DPTX_WRITE_DPCD, sizeof(reg));
+	if (ret)
+		goto err_dpcd_write;
+
+	ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
+	if (ret)
+		goto err_dpcd_write;
+
+	if (addr != (reg[2] << 16 | reg[3] << 8 | reg[4]))
+		ret = -EINVAL;
+
+err_dpcd_write:
+	if (ret)
+		DRM_DEV_ERROR(dp->dev, "dpcd write failed: %d\n", ret);
+	return ret;
+}
+
+int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
+			 u32 i_size, const u32 *d_mem, u32 d_size)
+{
+	u32 reg;
+	int i, ret;
+
+	/* reset ucpu before load firmware*/
+	writel(APB_IRAM_PATH | APB_DRAM_PATH | APB_XT_RESET,
+	       dp->regs + APB_CTRL);
+
+	for (i = 0; i < i_size; i += 4)
+		writel(*i_mem++, dp->regs + ADDR_IMEM + i);
+
+	for (i = 0; i < d_size; i += 4)
+		writel(*d_mem++, dp->regs + ADDR_DMEM + i);
+
+	/* un-reset ucpu */
+	writel(0, dp->regs + APB_CTRL);
+
+	/* check the keep alive register to make sure fw working */
+	ret = readx_poll_timeout(readl, dp->regs + KEEP_ALIVE,
+				 reg, reg, 2000, FW_ALIVE_TIMEOUT_US);
+	if (ret < 0) {
+		DRM_DEV_ERROR(dp->dev, "failed to loaded the FW reg = %x\n",
+			      reg);
+		return -EINVAL;
+	}
+
+	reg = readl(dp->regs + VER_L) & 0xff;
+	dp->fw_version = reg;
+	reg = readl(dp->regs + VER_H) & 0xff;
+	dp->fw_version |= reg << 8;
+	reg = readl(dp->regs + VER_LIB_L_ADDR) & 0xff;
+	dp->fw_version |= reg << 16;
+	reg = readl(dp->regs + VER_LIB_H_ADDR) & 0xff;
+	dp->fw_version |= reg << 24;
+
+	dev_dbg(dp->dev, "firmware version: %x\n", dp->fw_version);
+
+	return 0;
+}
+
+int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable)
+{
+	u8 msg[5];
+	int ret, i;
+
+	msg[0] = GENERAL_MAIN_CONTROL;
+	msg[1] = MB_MODULE_ID_GENERAL;
+	msg[2] = 0;
+	msg[3] = 1;
+	msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
+
+	for (i = 0; i < sizeof(msg); i++) {
+		ret = cdp_dp_mailbox_write(dp, msg[i]);
+		if (ret)
+			goto err_set_firmware_active;
+	}
+
+	/* read the firmware state */
+	for (i = 0; i < sizeof(msg); i++)  {
+		ret = cdn_dp_mailbox_read(dp);
+		if (ret < 0)
+			goto err_set_firmware_active;
+
+		msg[i] = ret;
+	}
+
+	ret = 0;
+
+err_set_firmware_active:
+	if (ret < 0)
+		DRM_DEV_ERROR(dp->dev, "set firmware active failed\n");
+	return ret;
+}
+
+int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip)
+{
+	u8 msg[8];
+	int ret;
+
+	msg[0] = CDN_DP_MAX_LINK_RATE;
+	msg[1] = lanes | SCRAMBLER_EN;
+	msg[2] = VOLTAGE_LEVEL_2;
+	msg[3] = PRE_EMPHASIS_LEVEL_3;
+	msg[4] = PTS1 | PTS2 | PTS3 | PTS4;
+	msg[5] = FAST_LT_NOT_SUPPORT;
+	msg[6] = flip ? LANE_MAPPING_FLIPPED : LANE_MAPPING_NORMAL;
+	msg[7] = ENHANCED;
+
+	ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
+				  DPTX_SET_HOST_CAPABILITIES,
+				  sizeof(msg), msg);
+	if (ret)
+		goto err_set_host_cap;
+
+	ret = cdn_dp_reg_write(dp, DP_AUX_SWAP_INVERSION_CONTROL,
+			       AUX_HOST_INVERT);
+
+err_set_host_cap:
+	if (ret)
+		DRM_DEV_ERROR(dp->dev, "set host cap failed: %d\n", ret);
+	return ret;
+}
+
+int cdn_dp_event_config(struct cdn_dp_device *dp)
+{
+	u8 msg[5];
+	int ret;
+
+	memset(msg, 0, sizeof(msg));
+
+	msg[0] = DPTX_EVENT_ENABLE_HPD | DPTX_EVENT_ENABLE_TRAINING;
+
+	ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_ENABLE_EVENT,
+				  sizeof(msg), msg);
+	if (ret)
+		DRM_DEV_ERROR(dp->dev, "set event config failed: %d\n", ret);
+
+	return ret;
+}
+
+u32 cdn_dp_get_event(struct cdn_dp_device *dp)
+{
+	return readl(dp->regs + SW_EVENTS0);
+}
+
+int cdn_dp_get_hpd_status(struct cdn_dp_device *dp)
+{
+	u8 status;
+	int ret;
+
+	ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_HPD_STATE,
+				  0, NULL);
+	if (ret)
+		goto err_get_hpd;
+
+	ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+					      DPTX_HPD_STATE, sizeof(status));
+	if (ret)
+		goto err_get_hpd;
+
+	ret = cdn_dp_mailbox_read_receive(dp, &status, sizeof(status));
+	if (ret)
+		goto err_get_hpd;
+
+	return status;
+
+err_get_hpd:
+	DRM_DEV_ERROR(dp->dev, "get hpd status failed: %d\n", ret);
+	return ret;
+}
+
+int cdn_dp_get_edid_block(void *data, u8 *edid,
+			  unsigned int block, size_t length)
+{
+	struct cdn_dp_device *dp = data;
+	u8 msg[2], reg[2], i;
+	int ret;
+
+	for (i = 0; i < 4; i++) {
+		msg[0] = block / 2;
+		msg[1] = block % 2;
+
+		ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_GET_EDID,
+					  sizeof(msg), msg);
+		if (ret)
+			continue;
+
+		ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+						      DPTX_GET_EDID,
+						      sizeof(reg) + length);
+		if (ret)
+			continue;
+
+		ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
+		if (ret)
+			continue;
+
+		ret = cdn_dp_mailbox_read_receive(dp, edid, length);
+		if (ret)
+			continue;
+
+		if (reg[0] == length && reg[1] == block / 2)
+			break;
+	}
+
+	if (ret)
+		DRM_DEV_ERROR(dp->dev, "get block[%d] edid failed: %d\n", block,
+			      ret);
+
+	return ret;
+}
+
+static int cdn_dp_training_start(struct cdn_dp_device *dp)
+{
+	unsigned long timeout;
+	u8 msg, event[2];
+	int ret;
+
+	msg = LINK_TRAINING_RUN;
+
+	/* start training */
+	ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_TRAINING_CONTROL,
+				  sizeof(msg), &msg);
+	if (ret)
+		goto err_training_start;
+
+	timeout = jiffies + msecs_to_jiffies(LINK_TRAINING_TIMEOUT_MS);
+	while (time_before(jiffies, timeout)) {
+		msleep(LINK_TRAINING_RETRY_MS);
+		ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
+					  DPTX_READ_EVENT, 0, NULL);
+		if (ret)
+			goto err_training_start;
+
+		ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+						      DPTX_READ_EVENT,
+						      sizeof(event));
+		if (ret)
+			goto err_training_start;
+
+		ret = cdn_dp_mailbox_read_receive(dp, event, sizeof(event));
+		if (ret)
+			goto err_training_start;
+
+		if (event[1] & EQ_PHASE_FINISHED)
+			return 0;
+	}
+
+	ret = -ETIMEDOUT;
+
+err_training_start:
+	DRM_DEV_ERROR(dp->dev, "training failed: %d\n", ret);
+	return ret;
+}
+
+static int cdn_dp_get_training_status(struct cdn_dp_device *dp)
+{
+	u8 status[10];
+	int ret;
+
+	ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_LINK_STAT,
+				  0, NULL);
+	if (ret)
+		goto err_get_training_status;
+
+	ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+					      DPTX_READ_LINK_STAT,
+					      sizeof(status));
+	if (ret)
+		goto err_get_training_status;
+
+	ret = cdn_dp_mailbox_read_receive(dp, status, sizeof(status));
+	if (ret)
+		goto err_get_training_status;
+
+	dp->link.rate = status[0];
+	dp->link.num_lanes = status[1];
+
+err_get_training_status:
+	if (ret)
+		DRM_DEV_ERROR(dp->dev, "get training status failed: %d\n", ret);
+	return ret;
+}
+
+int cdn_dp_train_link(struct cdn_dp_device *dp)
+{
+	int ret;
+
+	ret = cdn_dp_training_start(dp);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Failed to start training %d\n", ret);
+		return ret;
+	}
+
+	ret = cdn_dp_get_training_status(dp);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "Failed to get training stat %d\n", ret);
+		return ret;
+	}
+
+	DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate,
+			  dp->link.num_lanes);
+	return ret;
+}
+
+int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active)
+{
+	u8 msg;
+	int ret;
+
+	msg = !!active;
+
+	ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_SET_VIDEO,
+				  sizeof(msg), &msg);
+	if (ret)
+		DRM_DEV_ERROR(dp->dev, "set video status failed: %d\n", ret);
+
+	return ret;
+}
+
+static int cdn_dp_get_msa_misc(struct video_info *video,
+			       struct drm_display_mode *mode)
+{
+	u32 msa_misc;
+	u8 val[2] = {0};
+
+	switch (video->color_fmt) {
+	case PXL_RGB:
+	case Y_ONLY:
+		val[0] = 0;
+		break;
+	/* set YUV default color space conversion to BT601 */
+	case YCBCR_4_4_4:
+		val[0] = 6 + BT_601 * 8;
+		break;
+	case YCBCR_4_2_2:
+		val[0] = 5 + BT_601 * 8;
+		break;
+	case YCBCR_4_2_0:
+		val[0] = 5;
+		break;
+	};
+
+	switch (video->color_depth) {
+	case 6:
+		val[1] = 0;
+		break;
+	case 8:
+		val[1] = 1;
+		break;
+	case 10:
+		val[1] = 2;
+		break;
+	case 12:
+		val[1] = 3;
+		break;
+	case 16:
+		val[1] = 4;
+		break;
+	};
+
+	msa_misc = 2 * val[0] + 32 * val[1] +
+		   ((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
+
+	return msa_misc;
+}
+
+int cdn_dp_config_video(struct cdn_dp_device *dp)
+{
+	struct video_info *video = &dp->video_info;
+	struct drm_display_mode *mode = &dp->mode;
+	u64 symbol;
+	u32 val, link_rate, rem;
+	u8 bit_per_pix, tu_size_reg = TU_SIZE;
+	int ret;
+
+	bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
+		      (video->color_depth * 2) : (video->color_depth * 3);
+
+	link_rate = drm_dp_bw_code_to_link_rate(dp->link.rate) / 1000;
+
+	ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
+	if (ret)
+		goto err_config_video;
+
+	ret = cdn_dp_reg_write(dp, HSYNC2VSYNC_POL_CTRL, 0);
+	if (ret)
+		goto err_config_video;
+
+	/*
+	 * get a best tu_size and valid symbol:
+	 * 1. chose Lclk freq(162Mhz, 270Mhz, 540Mhz), set TU to 32
+	 * 2. calculate VS(valid symbol) = TU * Pclk * Bpp / (Lclk * Lanes)
+	 * 3. if VS > *.85 or VS < *.1 or VS < 2 or TU < VS + 4, then set
+	 *    TU += 2 and repeat 2nd step.
+	 */
+	do {
+		tu_size_reg += 2;
+		symbol = tu_size_reg * mode->clock * bit_per_pix;
+		do_div(symbol, dp->link.num_lanes * link_rate * 8);
+		rem = do_div(symbol, 1000);
+		if (tu_size_reg > 64) {
+			ret = -EINVAL;
+			goto err_config_video;
+		}
+	} while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
+		 (rem > 850) || (rem < 100));
+
+	val = symbol + (tu_size_reg << 8);
+	val |= TU_CNT_RST_EN;
+	ret = cdn_dp_reg_write(dp, DP_FRAMER_TU, val);
+	if (ret)
+		goto err_config_video;
+
+	/* set the FIFO Buffer size */
+	val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
+	val /= (dp->link.num_lanes * link_rate);
+	val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
+	val += 2;
+	ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val);
+
+	switch (video->color_depth) {
+	case 6:
+		val = BCS_6;
+		break;
+	case 8:
+		val = BCS_8;
+		break;
+	case 10:
+		val = BCS_10;
+		break;
+	case 12:
+		val = BCS_12;
+		break;
+	case 16:
+		val = BCS_16;
+		break;
+	};
+
+	val += video->color_fmt << 8;
+	ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val);
+	if (ret)
+		goto err_config_video;
+
+	val = video->h_sync_polarity ? DP_FRAMER_SP_HSP : 0;
+	val |= video->v_sync_polarity ? DP_FRAMER_SP_VSP : 0;
+	ret = cdn_dp_reg_write(dp, DP_FRAMER_SP, val);
+	if (ret)
+		goto err_config_video;
+
+	val = (mode->hsync_start - mode->hdisplay) << 16;
+	val |= mode->htotal - mode->hsync_end;
+	ret = cdn_dp_reg_write(dp, DP_FRONT_BACK_PORCH, val);
+	if (ret)
+		goto err_config_video;
+
+	val = mode->hdisplay * bit_per_pix / 8;
+	ret = cdn_dp_reg_write(dp, DP_BYTE_COUNT, val);
+	if (ret)
+		goto err_config_video;
+
+	val = mode->htotal | ((mode->htotal - mode->hsync_start) << 16);
+	ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_0, val);
+	if (ret)
+		goto err_config_video;
+
+	val = mode->hsync_end - mode->hsync_start;
+	val |= (mode->hdisplay << 16) | (video->h_sync_polarity << 15);
+	ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_1, val);
+	if (ret)
+		goto err_config_video;
+
+	val = mode->vtotal;
+	val |= (mode->vtotal - mode->vsync_start) << 16;
+	ret = cdn_dp_reg_write(dp, MSA_VERTICAL_0, val);
+	if (ret)
+		goto err_config_video;
+
+	val = mode->vsync_end - mode->vsync_start;
+	val |= (mode->vdisplay << 16) | (video->v_sync_polarity << 15);
+	ret = cdn_dp_reg_write(dp, MSA_VERTICAL_1, val);
+	if (ret)
+		goto err_config_video;
+
+	val = cdn_dp_get_msa_misc(video, mode);
+	ret = cdn_dp_reg_write(dp, MSA_MISC, val);
+	if (ret)
+		goto err_config_video;
+
+	ret = cdn_dp_reg_write(dp, STREAM_CONFIG, 1);
+	if (ret)
+		goto err_config_video;
+
+	val = mode->hsync_end - mode->hsync_start;
+	val |= mode->hdisplay << 16;
+	ret = cdn_dp_reg_write(dp, DP_HORIZONTAL, val);
+	if (ret)
+		goto err_config_video;
+
+	val = mode->vdisplay;
+	val |= (mode->vtotal - mode->vsync_start) << 16;
+	ret = cdn_dp_reg_write(dp, DP_VERTICAL_0, val);
+	if (ret)
+		goto err_config_video;
+
+	val = mode->vtotal;
+	ret = cdn_dp_reg_write(dp, DP_VERTICAL_1, val);
+	if (ret)
+		goto err_config_video;
+
+	ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 2, 1, 0);
+
+err_config_video:
+	if (ret)
+		DRM_DEV_ERROR(dp->dev, "config video failed: %d\n", ret);
+	return ret;
+}
+
+int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
+{
+	u32 val;
+	int ret;
+
+	ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
+	if (ret) {
+		DRM_DEV_ERROR(dp->dev, "audio stop failed: %d\n", ret);
+		return ret;
+	}
+
+	val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
+	val |= SPDIF_FIFO_MID_RANGE(0xe0);
+	val |= SPDIF_JITTER_THRSH(0xe0);
+	val |= SPDIF_JITTER_AVG_WIN(7);
+	writel(val, dp->regs + SPDIF_CTRL_ADDR);
+
+	/* clearn the audio config and reset */
+	writel(0, dp->regs + AUDIO_SRC_CNTL);
+	writel(0, dp->regs + AUDIO_SRC_CNFG);
+	writel(AUDIO_SW_RST, dp->regs + AUDIO_SRC_CNTL);
+	writel(0, dp->regs + AUDIO_SRC_CNTL);
+
+	/* reset smpl2pckt component  */
+	writel(0, dp->regs + SMPL2PKT_CNTL);
+	writel(AUDIO_SW_RST, dp->regs + SMPL2PKT_CNTL);
+	writel(0, dp->regs + SMPL2PKT_CNTL);
+
+	/* reset FIFO */
+	writel(AUDIO_SW_RST, dp->regs + FIFO_CNTL);
+	writel(0, dp->regs + FIFO_CNTL);
+
+	if (audio->format == AFMT_SPDIF)
+		clk_disable_unprepare(dp->spdif_clk);
+
+	return 0;
+}
+
+int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable)
+{
+	int ret;
+
+	ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 4, 1, enable);
+	if (ret)
+		DRM_DEV_ERROR(dp->dev, "audio mute failed: %d\n", ret);
+
+	return ret;
+}
+
+static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp,
+				    struct audio_info *audio)
+{
+	int sub_pckt_num = 1, i2s_port_en_val = 0xf, i;
+	u32 val;
+
+	if (audio->channels == 2) {
+		if (dp->link.num_lanes == 1)
+			sub_pckt_num = 2;
+		else
+			sub_pckt_num = 4;
+
+		i2s_port_en_val = 1;
+	} else if (audio->channels == 4) {
+		i2s_port_en_val = 3;
+	}
+
+	writel(0x0, dp->regs + SPDIF_CTRL_ADDR);
+
+	writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
+
+	val = MAX_NUM_CH(audio->channels);
+	val |= NUM_OF_I2S_PORTS(audio->channels);
+	val |= AUDIO_TYPE_LPCM;
+	val |= CFG_SUB_PCKT_NUM(sub_pckt_num);
+	writel(val, dp->regs + SMPL2PKT_CNFG);
+
+	if (audio->sample_width == 16)
+		val = 0;
+	else if (audio->sample_width == 24)
+		val = 1 << 9;
+	else
+		val = 2 << 9;
+
+	val |= AUDIO_CH_NUM(audio->channels);
+	val |= I2S_DEC_PORT_EN(i2s_port_en_val);
+	val |= TRANS_SMPL_WIDTH_32;
+	writel(val, dp->regs + AUDIO_SRC_CNFG);
+
+	for (i = 0; i < (audio->channels + 1) / 2; i++) {
+		if (audio->sample_width == 16)
+			val = (0x02 << 8) | (0x02 << 20);
+		else if (audio->sample_width == 24)
+			val = (0x0b << 8) | (0x0b << 20);
+
+		val |= ((2 * i) << 4) | ((2 * i + 1) << 16);
+		writel(val, dp->regs + STTS_BIT_CH(i));
+	}
+
+	switch (audio->sample_rate) {
+	case 32000:
+		val = SAMPLING_FREQ(3) |
+		      ORIGINAL_SAMP_FREQ(0xc);
+		break;
+	case 44100:
+		val = SAMPLING_FREQ(0) |
+		      ORIGINAL_SAMP_FREQ(0xf);
+		break;
+	case 48000:
+		val = SAMPLING_FREQ(2) |
+		      ORIGINAL_SAMP_FREQ(0xd);
+		break;
+	case 88200:
+		val = SAMPLING_FREQ(8) |
+		      ORIGINAL_SAMP_FREQ(0x7);
+		break;
+	case 96000:
+		val = SAMPLING_FREQ(0xa) |
+		      ORIGINAL_SAMP_FREQ(5);
+		break;
+	case 176400:
+		val = SAMPLING_FREQ(0xc) |
+		      ORIGINAL_SAMP_FREQ(3);
+		break;
+	case 192000:
+		val = SAMPLING_FREQ(0xe) |
+		      ORIGINAL_SAMP_FREQ(1);
+		break;
+	}
+	val |= 4;
+	writel(val, dp->regs + COM_CH_STTS_BITS);
+
+	writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
+	writel(I2S_DEC_START, dp->regs + AUDIO_SRC_CNTL);
+}
+
+static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
+{
+	u32 val;
+
+	val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
+	val |= SPDIF_FIFO_MID_RANGE(0xe0);
+	val |= SPDIF_JITTER_THRSH(0xe0);
+	val |= SPDIF_JITTER_AVG_WIN(7);
+	writel(val, dp->regs + SPDIF_CTRL_ADDR);
+
+	writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
+
+	val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
+	writel(val, dp->regs + SMPL2PKT_CNFG);
+	writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
+
+	val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
+	val |= SPDIF_FIFO_MID_RANGE(0xe0);
+	val |= SPDIF_JITTER_THRSH(0xe0);
+	val |= SPDIF_JITTER_AVG_WIN(7);
+	writel(val, dp->regs + SPDIF_CTRL_ADDR);
+
+	clk_prepare_enable(dp->spdif_clk);
+	clk_set_rate(dp->spdif_clk, CDN_DP_SPDIF_CLK);
+}
+
+int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio)
+{
+	int ret;
+
+	/* reset the spdif clk before config */
+	if (audio->format == AFMT_SPDIF) {
+		reset_control_assert(dp->spdif_rst);
+		reset_control_deassert(dp->spdif_rst);
+	}
+
+	ret = cdn_dp_reg_write(dp, CM_LANE_CTRL, LANE_REF_CYC);
+	if (ret)
+		goto err_audio_config;
+
+	ret = cdn_dp_reg_write(dp, CM_CTRL, 0);
+	if (ret)
+		goto err_audio_config;
+
+	if (audio->format == AFMT_I2S)
+		cdn_dp_audio_config_i2s(dp, audio);
+	else if (audio->format == AFMT_SPDIF)
+		cdn_dp_audio_config_spdif(dp);
+
+	ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, AUDIO_PACK_EN);
+
+err_audio_config:
+	if (ret)
+		DRM_DEV_ERROR(dp->dev, "audio config failed: %d\n", ret);
+	return ret;
+}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.h b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
new file mode 100644
index 000000000000..b5f215324694
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CDN_DP_REG_H
+#define _CDN_DP_REG_H
+
+#include <linux/bitops.h>
+
+#define ADDR_IMEM		0x10000
+#define ADDR_DMEM		0x20000
+
+/* APB CFG addr */
+#define APB_CTRL			0
+#define XT_INT_CTRL			0x04
+#define MAILBOX_FULL_ADDR		0x08
+#define MAILBOX_EMPTY_ADDR		0x0c
+#define MAILBOX0_WR_DATA		0x10
+#define MAILBOX0_RD_DATA		0x14
+#define KEEP_ALIVE			0x18
+#define VER_L				0x1c
+#define VER_H				0x20
+#define VER_LIB_L_ADDR			0x24
+#define VER_LIB_H_ADDR			0x28
+#define SW_DEBUG_L			0x2c
+#define SW_DEBUG_H			0x30
+#define MAILBOX_INT_MASK		0x34
+#define MAILBOX_INT_STATUS		0x38
+#define SW_CLK_L			0x3c
+#define SW_CLK_H			0x40
+#define SW_EVENTS0			0x44
+#define SW_EVENTS1			0x48
+#define SW_EVENTS2			0x4c
+#define SW_EVENTS3			0x50
+#define XT_OCD_CTRL			0x60
+#define APB_INT_MASK			0x6c
+#define APB_STATUS_MASK			0x70
+
+/* audio decoder addr */
+#define AUDIO_SRC_CNTL			0x30000
+#define AUDIO_SRC_CNFG			0x30004
+#define COM_CH_STTS_BITS		0x30008
+#define STTS_BIT_CH(x)			(0x3000c + ((x) << 2))
+#define SPDIF_CTRL_ADDR			0x3004c
+#define SPDIF_CH1_CS_3100_ADDR		0x30050
+#define SPDIF_CH1_CS_6332_ADDR		0x30054
+#define SPDIF_CH1_CS_9564_ADDR		0x30058
+#define SPDIF_CH1_CS_12796_ADDR		0x3005c
+#define SPDIF_CH1_CS_159128_ADDR	0x30060
+#define SPDIF_CH1_CS_191160_ADDR	0x30064
+#define SPDIF_CH2_CS_3100_ADDR		0x30068
+#define SPDIF_CH2_CS_6332_ADDR		0x3006c
+#define SPDIF_CH2_CS_9564_ADDR		0x30070
+#define SPDIF_CH2_CS_12796_ADDR		0x30074
+#define SPDIF_CH2_CS_159128_ADDR	0x30078
+#define SPDIF_CH2_CS_191160_ADDR	0x3007c
+#define SMPL2PKT_CNTL			0x30080
+#define SMPL2PKT_CNFG			0x30084
+#define FIFO_CNTL			0x30088
+#define FIFO_STTS			0x3008c
+
+/* source pif addr */
+#define SOURCE_PIF_WR_ADDR		0x30800
+#define SOURCE_PIF_WR_REQ		0x30804
+#define SOURCE_PIF_RD_ADDR		0x30808
+#define SOURCE_PIF_RD_REQ		0x3080c
+#define SOURCE_PIF_DATA_WR		0x30810
+#define SOURCE_PIF_DATA_RD		0x30814
+#define SOURCE_PIF_FIFO1_FLUSH		0x30818
+#define SOURCE_PIF_FIFO2_FLUSH		0x3081c
+#define SOURCE_PIF_STATUS		0x30820
+#define SOURCE_PIF_INTERRUPT_SOURCE	0x30824
+#define SOURCE_PIF_INTERRUPT_MASK	0x30828
+#define SOURCE_PIF_PKT_ALLOC_REG	0x3082c
+#define SOURCE_PIF_PKT_ALLOC_WR_EN	0x30830
+#define SOURCE_PIF_SW_RESET		0x30834
+
+/* bellow registers need access by mailbox */
+/* source car addr */
+#define SOURCE_HDTX_CAR			0x0900
+#define SOURCE_DPTX_CAR			0x0904
+#define SOURCE_PHY_CAR			0x0908
+#define SOURCE_CEC_CAR			0x090c
+#define SOURCE_CBUS_CAR			0x0910
+#define SOURCE_PKT_CAR			0x0918
+#define SOURCE_AIF_CAR			0x091c
+#define SOURCE_CIPHER_CAR		0x0920
+#define SOURCE_CRYPTO_CAR		0x0924
+
+/* clock meters addr */
+#define CM_CTRL				0x0a00
+#define CM_I2S_CTRL			0x0a04
+#define CM_SPDIF_CTRL			0x0a08
+#define CM_VID_CTRL			0x0a0c
+#define CM_LANE_CTRL			0x0a10
+#define I2S_NM_STABLE			0x0a14
+#define I2S_NCTS_STABLE			0x0a18
+#define SPDIF_NM_STABLE			0x0a1c
+#define SPDIF_NCTS_STABLE		0x0a20
+#define NMVID_MEAS_STABLE		0x0a24
+#define I2S_MEAS			0x0a40
+#define SPDIF_MEAS			0x0a80
+#define NMVID_MEAS			0x0ac0
+
+/* source vif addr */
+#define BND_HSYNC2VSYNC			0x0b00
+#define HSYNC2VSYNC_F1_L1		0x0b04
+#define HSYNC2VSYNC_F2_L1		0x0b08
+#define HSYNC2VSYNC_STATUS		0x0b0c
+#define HSYNC2VSYNC_POL_CTRL		0x0b10
+
+/* dptx phy addr */
+#define DP_TX_PHY_CONFIG_REG		0x2000
+#define DP_TX_PHY_STATUS_REG		0x2004
+#define DP_TX_PHY_SW_RESET		0x2008
+#define DP_TX_PHY_SCRAMBLER_SEED	0x200c
+#define DP_TX_PHY_TRAINING_01_04	0x2010
+#define DP_TX_PHY_TRAINING_05_08	0x2014
+#define DP_TX_PHY_TRAINING_09_10	0x2018
+#define TEST_COR			0x23fc
+
+/* dptx hpd addr */
+#define HPD_IRQ_DET_MIN_TIMER		0x2100
+#define HPD_IRQ_DET_MAX_TIMER		0x2104
+#define HPD_UNPLGED_DET_MIN_TIMER	0x2108
+#define HPD_STABLE_TIMER		0x210c
+#define HPD_FILTER_TIMER		0x2110
+#define HPD_EVENT_MASK			0x211c
+#define HPD_EVENT_DET			0x2120
+
+/* dpyx framer addr */
+#define DP_FRAMER_GLOBAL_CONFIG		0x2200
+#define DP_SW_RESET			0x2204
+#define DP_FRAMER_TU			0x2208
+#define DP_FRAMER_PXL_REPR		0x220c
+#define DP_FRAMER_SP			0x2210
+#define AUDIO_PACK_CONTROL		0x2214
+#define DP_VC_TABLE(x)			(0x2218 + ((x) << 2))
+#define DP_VB_ID			0x2258
+#define DP_MTPH_LVP_CONTROL		0x225c
+#define DP_MTPH_SYMBOL_VALUES		0x2260
+#define DP_MTPH_ECF_CONTROL		0x2264
+#define DP_MTPH_ACT_CONTROL		0x2268
+#define DP_MTPH_STATUS			0x226c
+#define DP_INTERRUPT_SOURCE		0x2270
+#define DP_INTERRUPT_MASK		0x2274
+#define DP_FRONT_BACK_PORCH		0x2278
+#define DP_BYTE_COUNT			0x227c
+
+/* dptx stream addr */
+#define MSA_HORIZONTAL_0		0x2280
+#define MSA_HORIZONTAL_1		0x2284
+#define MSA_VERTICAL_0			0x2288
+#define MSA_VERTICAL_1			0x228c
+#define MSA_MISC			0x2290
+#define STREAM_CONFIG			0x2294
+#define AUDIO_PACK_STATUS		0x2298
+#define VIF_STATUS			0x229c
+#define PCK_STUFF_STATUS_0		0x22a0
+#define PCK_STUFF_STATUS_1		0x22a4
+#define INFO_PACK_STATUS		0x22a8
+#define RATE_GOVERNOR_STATUS		0x22ac
+#define DP_HORIZONTAL			0x22b0
+#define DP_VERTICAL_0			0x22b4
+#define DP_VERTICAL_1			0x22b8
+#define DP_BLOCK_SDP			0x22bc
+
+/* dptx glbl addr */
+#define DPTX_LANE_EN			0x2300
+#define DPTX_ENHNCD			0x2304
+#define DPTX_INT_MASK			0x2308
+#define DPTX_INT_STATUS			0x230c
+
+/* dp aux addr */
+#define DP_AUX_HOST_CONTROL		0x2800
+#define DP_AUX_INTERRUPT_SOURCE		0x2804
+#define DP_AUX_INTERRUPT_MASK		0x2808
+#define DP_AUX_SWAP_INVERSION_CONTROL	0x280c
+#define DP_AUX_SEND_NACK_TRANSACTION	0x2810
+#define DP_AUX_CLEAR_RX			0x2814
+#define DP_AUX_CLEAR_TX			0x2818
+#define DP_AUX_TIMER_STOP		0x281c
+#define DP_AUX_TIMER_CLEAR		0x2820
+#define DP_AUX_RESET_SW			0x2824
+#define DP_AUX_DIVIDE_2M		0x2828
+#define DP_AUX_TX_PREACHARGE_LENGTH	0x282c
+#define DP_AUX_FREQUENCY_1M_MAX		0x2830
+#define DP_AUX_FREQUENCY_1M_MIN		0x2834
+#define DP_AUX_RX_PRE_MIN		0x2838
+#define DP_AUX_RX_PRE_MAX		0x283c
+#define DP_AUX_TIMER_PRESET		0x2840
+#define DP_AUX_NACK_FORMAT		0x2844
+#define DP_AUX_TX_DATA			0x2848
+#define DP_AUX_RX_DATA			0x284c
+#define DP_AUX_TX_STATUS		0x2850
+#define DP_AUX_RX_STATUS		0x2854
+#define DP_AUX_RX_CYCLE_COUNTER		0x2858
+#define DP_AUX_MAIN_STATES		0x285c
+#define DP_AUX_MAIN_TIMER		0x2860
+#define DP_AUX_AFE_OUT			0x2864
+
+/* crypto addr */
+#define CRYPTO_HDCP_REVISION		0x5800
+#define HDCP_CRYPTO_CONFIG		0x5804
+#define CRYPTO_INTERRUPT_SOURCE		0x5808
+#define CRYPTO_INTERRUPT_MASK		0x580c
+#define CRYPTO22_CONFIG			0x5818
+#define CRYPTO22_STATUS			0x581c
+#define SHA_256_DATA_IN			0x583c
+#define SHA_256_DATA_OUT_(x)		(0x5850 + ((x) << 2))
+#define AES_32_KEY_(x)			(0x5870 + ((x) << 2))
+#define AES_32_DATA_IN			0x5880
+#define AES_32_DATA_OUT_(x)		(0x5884 + ((x) << 2))
+#define CRYPTO14_CONFIG			0x58a0
+#define CRYPTO14_STATUS			0x58a4
+#define CRYPTO14_PRNM_OUT		0x58a8
+#define CRYPTO14_KM_0			0x58ac
+#define CRYPTO14_KM_1			0x58b0
+#define CRYPTO14_AN_0			0x58b4
+#define CRYPTO14_AN_1			0x58b8
+#define CRYPTO14_YOUR_KSV_0		0x58bc
+#define CRYPTO14_YOUR_KSV_1		0x58c0
+#define CRYPTO14_MI_0			0x58c4
+#define CRYPTO14_MI_1			0x58c8
+#define CRYPTO14_TI_0			0x58cc
+#define CRYPTO14_KI_0			0x58d0
+#define CRYPTO14_KI_1			0x58d4
+#define CRYPTO14_BLOCKS_NUM		0x58d8
+#define CRYPTO14_KEY_MEM_DATA_0		0x58dc
+#define CRYPTO14_KEY_MEM_DATA_1		0x58e0
+#define CRYPTO14_SHA1_MSG_DATA		0x58e4
+#define CRYPTO14_SHA1_V_VALUE_(x)	(0x58e8 + ((x) << 2))
+#define TRNG_CTRL			0x58fc
+#define TRNG_DATA_RDY			0x5900
+#define TRNG_DATA			0x5904
+
+/* cipher addr */
+#define HDCP_REVISION			0x60000
+#define INTERRUPT_SOURCE		0x60004
+#define INTERRUPT_MASK			0x60008
+#define HDCP_CIPHER_CONFIG		0x6000c
+#define AES_128_KEY_0			0x60010
+#define AES_128_KEY_1			0x60014
+#define AES_128_KEY_2			0x60018
+#define AES_128_KEY_3			0x6001c
+#define AES_128_RANDOM_0		0x60020
+#define AES_128_RANDOM_1		0x60024
+#define CIPHER14_KM_0			0x60028
+#define CIPHER14_KM_1			0x6002c
+#define CIPHER14_STATUS			0x60030
+#define CIPHER14_RI_PJ_STATUS		0x60034
+#define CIPHER_MODE			0x60038
+#define CIPHER14_AN_0			0x6003c
+#define CIPHER14_AN_1			0x60040
+#define CIPHER22_AUTH			0x60044
+#define CIPHER14_R0_DP_STATUS		0x60048
+#define CIPHER14_BOOTSTRAP		0x6004c
+
+#define DPTX_FRMR_DATA_CLK_RSTN_EN	BIT(11)
+#define DPTX_FRMR_DATA_CLK_EN		BIT(10)
+#define DPTX_PHY_DATA_RSTN_EN		BIT(9)
+#define DPTX_PHY_DATA_CLK_EN		BIT(8)
+#define DPTX_PHY_CHAR_RSTN_EN		BIT(7)
+#define DPTX_PHY_CHAR_CLK_EN		BIT(6)
+#define SOURCE_AUX_SYS_CLK_RSTN_EN	BIT(5)
+#define SOURCE_AUX_SYS_CLK_EN		BIT(4)
+#define DPTX_SYS_CLK_RSTN_EN		BIT(3)
+#define DPTX_SYS_CLK_EN			BIT(2)
+#define CFG_DPTX_VIF_CLK_RSTN_EN	BIT(1)
+#define CFG_DPTX_VIF_CLK_EN		BIT(0)
+
+#define SOURCE_PHY_RSTN_EN		BIT(1)
+#define SOURCE_PHY_CLK_EN		BIT(0)
+
+#define SOURCE_PKT_SYS_RSTN_EN		BIT(3)
+#define SOURCE_PKT_SYS_CLK_EN		BIT(2)
+#define SOURCE_PKT_DATA_RSTN_EN		BIT(1)
+#define SOURCE_PKT_DATA_CLK_EN		BIT(0)
+
+#define SPDIF_CDR_CLK_RSTN_EN		BIT(5)
+#define SPDIF_CDR_CLK_EN		BIT(4)
+#define SOURCE_AIF_SYS_RSTN_EN		BIT(3)
+#define SOURCE_AIF_SYS_CLK_EN		BIT(2)
+#define SOURCE_AIF_CLK_RSTN_EN		BIT(1)
+#define SOURCE_AIF_CLK_EN		BIT(0)
+
+#define SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN	BIT(3)
+#define SOURCE_CIPHER_SYS_CLK_EN		BIT(2)
+#define SOURCE_CIPHER_CHAR_CLK_RSTN_EN		BIT(1)
+#define SOURCE_CIPHER_CHAR_CLK_EN		BIT(0)
+
+#define SOURCE_CRYPTO_SYS_CLK_RSTN_EN	BIT(1)
+#define SOURCE_CRYPTO_SYS_CLK_EN	BIT(0)
+
+#define APB_IRAM_PATH			BIT(2)
+#define APB_DRAM_PATH			BIT(1)
+#define APB_XT_RESET			BIT(0)
+
+#define MAILBOX_INT_MASK_BIT		BIT(1)
+#define PIF_INT_MASK_BIT		BIT(0)
+#define ALL_INT_MASK			3
+
+/* mailbox */
+#define MB_OPCODE_ID			0
+#define MB_MODULE_ID			1
+#define MB_SIZE_MSB_ID			2
+#define MB_SIZE_LSB_ID			3
+#define MB_DATA_ID			4
+
+#define MB_MODULE_ID_DP_TX		0x01
+#define MB_MODULE_ID_HDCP_TX		0x07
+#define MB_MODULE_ID_HDCP_RX		0x08
+#define MB_MODULE_ID_HDCP_GENERAL	0x09
+#define MB_MODULE_ID_GENERAL		0x0a
+
+/* general opcode */
+#define GENERAL_MAIN_CONTROL            0x01
+#define GENERAL_TEST_ECHO               0x02
+#define GENERAL_BUS_SETTINGS            0x03
+#define GENERAL_TEST_ACCESS             0x04
+
+#define DPTX_SET_POWER_MNG			0x00
+#define DPTX_SET_HOST_CAPABILITIES		0x01
+#define DPTX_GET_EDID				0x02
+#define DPTX_READ_DPCD				0x03
+#define DPTX_WRITE_DPCD				0x04
+#define DPTX_ENABLE_EVENT			0x05
+#define DPTX_WRITE_REGISTER			0x06
+#define DPTX_READ_REGISTER			0x07
+#define DPTX_WRITE_FIELD			0x08
+#define DPTX_TRAINING_CONTROL			0x09
+#define DPTX_READ_EVENT				0x0a
+#define DPTX_READ_LINK_STAT			0x0b
+#define DPTX_SET_VIDEO				0x0c
+#define DPTX_SET_AUDIO				0x0d
+#define DPTX_GET_LAST_AUX_STAUS			0x0e
+#define DPTX_SET_LINK_BREAK_POINT		0x0f
+#define DPTX_FORCE_LANES			0x10
+#define DPTX_HPD_STATE				0x11
+
+#define FW_STANDBY				0
+#define FW_ACTIVE				1
+
+#define DPTX_EVENT_ENABLE_HPD			BIT(0)
+#define DPTX_EVENT_ENABLE_TRAINING		BIT(1)
+
+#define LINK_TRAINING_NOT_ACTIVE		0
+#define LINK_TRAINING_RUN			1
+#define LINK_TRAINING_RESTART			2
+
+#define CONTROL_VIDEO_IDLE			0
+#define CONTROL_VIDEO_VALID			1
+
+#define TU_CNT_RST_EN				BIT(15)
+#define VIF_BYPASS_INTERLACE			BIT(13)
+#define INTERLACE_FMT_DET			BIT(12)
+#define INTERLACE_DTCT_WIN			0x20
+
+#define DP_FRAMER_SP_INTERLACE_EN		BIT(2)
+#define DP_FRAMER_SP_HSP			BIT(1)
+#define DP_FRAMER_SP_VSP			BIT(0)
+
+/* capability */
+#define AUX_HOST_INVERT				3
+#define	FAST_LT_SUPPORT				1
+#define FAST_LT_NOT_SUPPORT			0
+#define LANE_MAPPING_NORMAL			0x1b
+#define LANE_MAPPING_FLIPPED			0xe4
+#define ENHANCED				1
+#define SCRAMBLER_EN				BIT(4)
+
+#define	FULL_LT_STARTED				BIT(0)
+#define FASE_LT_STARTED				BIT(1)
+#define CLK_RECOVERY_FINISHED			BIT(2)
+#define EQ_PHASE_FINISHED			BIT(3)
+#define FASE_LT_START_FINISHED			BIT(4)
+#define CLK_RECOVERY_FAILED			BIT(5)
+#define EQ_PHASE_FAILED				BIT(6)
+#define FASE_LT_FAILED				BIT(7)
+
+#define DPTX_HPD_EVENT				BIT(0)
+#define DPTX_TRAINING_EVENT			BIT(1)
+#define HDCP_TX_STATUS_EVENT			BIT(4)
+#define HDCP2_TX_IS_KM_STORED_EVENT		BIT(5)
+#define HDCP2_TX_STORE_KM_EVENT			BIT(6)
+#define HDCP_TX_IS_RECEIVER_ID_VALID_EVENT	BIT(7)
+
+#define TU_SIZE					30
+#define CDN_DP_MAX_LINK_RATE			DP_LINK_BW_5_4
+
+/* audio */
+#define AUDIO_PACK_EN				BIT(8)
+#define SAMPLING_FREQ(x)			(((x) & 0xf) << 16)
+#define ORIGINAL_SAMP_FREQ(x)			(((x) & 0xf) << 24)
+#define SYNC_WR_TO_CH_ZERO			BIT(1)
+#define I2S_DEC_START				BIT(1)
+#define AUDIO_SW_RST				BIT(0)
+#define SMPL2PKT_EN				BIT(1)
+#define MAX_NUM_CH(x)				(((x) & 0x1f) - 1)
+#define NUM_OF_I2S_PORTS(x)			((((x) / 2 - 1) & 0x3) << 5)
+#define AUDIO_TYPE_LPCM				(2 << 7)
+#define CFG_SUB_PCKT_NUM(x)			((((x) - 1) & 0x7) << 11)
+#define AUDIO_CH_NUM(x)				((((x) - 1) & 0x1f) << 2)
+#define TRANS_SMPL_WIDTH_16			0
+#define TRANS_SMPL_WIDTH_24			BIT(11)
+#define TRANS_SMPL_WIDTH_32			(2 << 11)
+#define I2S_DEC_PORT_EN(x)			(((x) & 0xf) << 17)
+#define SPDIF_ENABLE				BIT(21)
+#define SPDIF_AVG_SEL				BIT(20)
+#define SPDIF_JITTER_BYPASS			BIT(19)
+#define SPDIF_FIFO_MID_RANGE(x)			(((x) & 0xff) << 11)
+#define SPDIF_JITTER_THRSH(x)			(((x) & 0xff) << 3)
+#define SPDIF_JITTER_AVG_WIN(x)			((x) & 0x7)
+
+/* Reference cycles when using lane clock as reference */
+#define LANE_REF_CYC				0x8000
+
+enum voltage_swing_level {
+	VOLTAGE_LEVEL_0,
+	VOLTAGE_LEVEL_1,
+	VOLTAGE_LEVEL_2,
+	VOLTAGE_LEVEL_3,
+};
+
+enum pre_emphasis_level {
+	PRE_EMPHASIS_LEVEL_0,
+	PRE_EMPHASIS_LEVEL_1,
+	PRE_EMPHASIS_LEVEL_2,
+	PRE_EMPHASIS_LEVEL_3,
+};
+
+enum pattern_set {
+	PTS1		= BIT(0),
+	PTS2		= BIT(1),
+	PTS3		= BIT(2),
+	PTS4		= BIT(3),
+	DP_NONE		= BIT(4)
+};
+
+enum vic_color_depth {
+	BCS_6 = 0x1,
+	BCS_8 = 0x2,
+	BCS_10 = 0x4,
+	BCS_12 = 0x8,
+	BCS_16 = 0x10,
+};
+
+enum vic_bt_type {
+	BT_601 = 0x0,
+	BT_709 = 0x1,
+};
+
+void cdn_dp_clock_reset(struct cdn_dp_device *dp);
+
+void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, u32 clk);
+int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
+			 u32 i_size, const u32 *d_mem, u32 d_size);
+int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable);
+int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip);
+int cdn_dp_event_config(struct cdn_dp_device *dp);
+u32 cdn_dp_get_event(struct cdn_dp_device *dp);
+int cdn_dp_get_hpd_status(struct cdn_dp_device *dp);
+int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value);
+int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len);
+int cdn_dp_get_edid_block(void *dp, u8 *edid,
+			  unsigned int block, size_t length);
+int cdn_dp_train_link(struct cdn_dp_device *dp);
+int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active);
+int cdn_dp_config_video(struct cdn_dp_device *dp);
+int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio);
+int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable);
+int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio);
+#endif /* _CDN_DP_REG_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index d5e1f8627d38..c9ccdf8f44bb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -213,7 +213,7 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
 
 	rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
 	if (IS_ERR(rockchip_fb))
-		return NULL;
+		return ERR_CAST(rockchip_fb);
 
 	return &rockchip_fb->fb;
 }
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 52d1fdf9f9da..70ad50dd594d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -129,19 +129,16 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
 {
 	struct rockchip_drm_private *private = dev->dev_private;
 	struct drm_fb_helper *helper;
-	unsigned int num_crtc;
 	int ret;
 
 	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
 		return -EINVAL;
 
-	num_crtc = dev->mode_config.num_crtc;
-
 	helper = &private->fbdev_helper;
 
 	drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, helper, num_crtc, ROCKCHIP_MAX_CONNECTOR);
+	ret = drm_fb_helper_init(dev, helper, ROCKCHIP_MAX_CONNECTOR);
 	if (ret < 0) {
 		dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n",
 			ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index fb5f001f51c3..76c79ac57df0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -531,6 +531,8 @@ static int vop_enable(struct drm_crtc *crtc)
 	}
 
 	memcpy(vop->regs, vop->regsbak, vop->len);
+	vop_cfg_done(vop);
+
 	/*
 	 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
 	 */
@@ -582,6 +584,8 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
 		spin_unlock(&vop->reg_lock);
 	}
 
+	vop_cfg_done(vop);
+
 	drm_crtc_vblank_off(crtc);
 
 	/*
@@ -932,9 +936,11 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
 		vop_dsp_hold_valid_irq_disable(vop);
 	}
 
-	pin_pol = 0x8;
-	pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
-	pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
+	pin_pol = BIT(DCLK_INVERT);
+	pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ?
+		   0 : BIT(HSYNC_POSITIVE);
+	pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ?
+		   0 : BIT(VSYNC_POSITIVE);
 	VOP_CTRL_SET(vop, pin_pol, pin_pol);
 
 	switch (s->output_type) {
@@ -954,6 +960,11 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
 		VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
 		VOP_CTRL_SET(vop, mipi_en, 1);
 		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		pin_pol &= ~BIT(DCLK_INVERT);
+		VOP_CTRL_SET(vop, dp_pin_pol, pin_pol);
+		VOP_CTRL_SET(vop, dp_en, 1);
+		break;
 	default:
 		DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
 			      s->output_type);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 1dbc52615257..5a4faa85dbd2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -45,6 +45,7 @@ struct vop_ctrl {
 	struct vop_reg edp_en;
 	struct vop_reg hdmi_en;
 	struct vop_reg mipi_en;
+	struct vop_reg dp_en;
 	struct vop_reg out_mode;
 	struct vop_reg dither_down;
 	struct vop_reg dither_up;
@@ -53,6 +54,7 @@ struct vop_ctrl {
 	struct vop_reg hdmi_pin_pol;
 	struct vop_reg edp_pin_pol;
 	struct vop_reg mipi_pin_pol;
+	struct vop_reg dp_pin_pol;
 
 	struct vop_reg htotal_pw;
 	struct vop_reg hact_st_end;
@@ -244,6 +246,13 @@ enum scale_down_mode {
 	SCALE_DOWN_AVG = 0x1
 };
 
+enum vop_pol {
+	HSYNC_POSITIVE = 0,
+	VSYNC_POSITIVE = 1,
+	DEN_NEGATIVE   = 2,
+	DCLK_INVERT    = 3
+};
+
 #define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
 #define SCL_FT_DEFAULT_FIXPOINT_SHIFT	12
 #define SCL_MAX_VSKIPLINES		4
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 35c51f3402f2..91fbc7b52147 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -284,6 +284,7 @@ static const struct vop_data rk3288_vop = {
 static const struct vop_ctrl rk3399_ctrl_data = {
 	.standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22),
 	.gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23),
+	.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
 	.rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
 	.hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
 	.edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
@@ -293,6 +294,7 @@ static const struct vop_ctrl rk3399_ctrl_data = {
 	.data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19),
 	.out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0),
 	.rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
+	.dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
 	.hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20),
 	.edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24),
 	.mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28),
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
index 6a4575fdc1c0..37bbdac52896 100644
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -17,6 +17,7 @@ selftest(align32, igt_align32)
 selftest(align64, igt_align64)
 selftest(evict, igt_evict)
 selftest(evict_range, igt_evict_range)
+selftest(bottomup, igt_bottomup)
 selftest(topdown, igt_topdown)
 selftest(color, igt_color)
 selftest(color_evict, igt_color_evict)
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 6df53e6c1308..1e71bc182ca9 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -22,23 +22,24 @@ static unsigned int max_iterations = 8192;
 static unsigned int max_prime = 128;
 
 enum {
-	DEFAULT,
-	TOPDOWN,
 	BEST,
+	BOTTOMUP,
+	TOPDOWN,
+	EVICT,
 };
 
 static const struct insert_mode {
 	const char *name;
-	unsigned int search_flags;
-	unsigned int create_flags;
+	enum drm_mm_insert_mode mode;
 } insert_modes[] = {
-	[DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
-	[TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
-	[BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
+	[BEST] = { "best", DRM_MM_INSERT_BEST },
+	[BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
+	[TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
+	[EVICT] = { "evict", DRM_MM_INSERT_EVICT },
 	{}
 }, evict_modes[] = {
-	{ "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
-	{ "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
+	{ "bottom-up", DRM_MM_INSERT_LOW },
+	{ "top-down", DRM_MM_INSERT_HIGH },
 	{}
 };
 
@@ -526,8 +527,7 @@ static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
 
 	err = drm_mm_insert_node_generic(mm, node,
 					 size, alignment, color,
-					 mode->search_flags,
-					 mode->create_flags);
+					 mode->mode);
 	if (err) {
 		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
 		       size, alignment, color, mode->name, err);
@@ -547,7 +547,7 @@ static bool expect_insert_fail(struct drm_mm *mm, u64 size)
 	struct drm_mm_node tmp = {};
 	int err;
 
-	err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT);
+	err = drm_mm_insert_node(mm, &tmp, size);
 	if (likely(err == -ENOSPC))
 		return true;
 
@@ -753,11 +753,10 @@ static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
 {
 	int err;
 
-	err = drm_mm_insert_node_in_range_generic(mm, node,
-						  size, alignment, color,
-						  range_start, range_end,
-						  mode->search_flags,
-						  mode->create_flags);
+	err = drm_mm_insert_node_in_range(mm, node,
+					  size, alignment, color,
+					  range_start, range_end,
+					  mode->mode);
 	if (err) {
 		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
 		       size, alignment, color, mode->name,
@@ -781,11 +780,10 @@ static bool expect_insert_in_range_fail(struct drm_mm *mm,
 	struct drm_mm_node tmp = {};
 	int err;
 
-	err = drm_mm_insert_node_in_range_generic(mm, &tmp,
-						  size, 0, 0,
-						  range_start, range_end,
-						  DRM_MM_SEARCH_DEFAULT,
-						  DRM_MM_CREATE_DEFAULT);
+	err = drm_mm_insert_node_in_range(mm, &tmp,
+					  size, 0, 0,
+					  range_start, range_end,
+					  0);
 	if (likely(err == -ENOSPC))
 		return true;
 
@@ -1324,7 +1322,7 @@ static int evict_something(struct drm_mm *mm,
 	drm_mm_scan_init_with_range(&scan, mm,
 				    size, alignment, 0,
 				    range_start, range_end,
-				    mode->create_flags);
+				    mode->mode);
 	if (!evict_nodes(&scan,
 			 nodes, order, count, false,
 			 &evict_list))
@@ -1332,8 +1330,7 @@ static int evict_something(struct drm_mm *mm,
 
 	memset(&tmp, 0, sizeof(tmp));
 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
-					 mode->search_flags,
-					 mode->create_flags);
+					 DRM_MM_INSERT_EVICT);
 	if (err) {
 		pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
 		       size, alignment);
@@ -1408,8 +1405,7 @@ static int igt_evict(void *ignored)
 	ret = -EINVAL;
 	drm_mm_init(&mm, 0, size);
 	for (n = 0; n < size; n++) {
-		err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
-					 DRM_MM_SEARCH_DEFAULT);
+		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
 		if (err) {
 			pr_err("insert failed, step %d\n", n);
 			ret = err;
@@ -1517,8 +1513,7 @@ static int igt_evict_range(void *ignored)
 	ret = -EINVAL;
 	drm_mm_init(&mm, 0, size);
 	for (n = 0; n < size; n++) {
-		err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
-					 DRM_MM_SEARCH_DEFAULT);
+		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
 		if (err) {
 			pr_err("insert failed, step %d\n", n);
 			ret = err;
@@ -1702,6 +1697,106 @@ err:
 	return ret;
 }
 
+static int igt_bottomup(void *ignored)
+{
+	const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
+	DRM_RND_STATE(prng, random_seed);
+	const unsigned int count = 8192;
+	unsigned int size;
+	unsigned long *bitmap;
+	struct drm_mm mm;
+	struct drm_mm_node *nodes, *node, *next;
+	unsigned int *order, n, m, o = 0;
+	int ret;
+
+	/* Like igt_topdown, but instead of searching for the last hole,
+	 * we search for the first.
+	 */
+
+	ret = -ENOMEM;
+	nodes = vzalloc(count * sizeof(*nodes));
+	if (!nodes)
+		goto err;
+
+	bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
+			 GFP_TEMPORARY);
+	if (!bitmap)
+		goto err_nodes;
+
+	order = drm_random_order(count, &prng);
+	if (!order)
+		goto err_bitmap;
+
+	ret = -EINVAL;
+	for (size = 1; size <= 64; size <<= 1) {
+		drm_mm_init(&mm, 0, size*count);
+		for (n = 0; n < count; n++) {
+			if (!expect_insert(&mm, &nodes[n],
+					   size, 0, n,
+					   bottomup)) {
+				pr_err("bottomup insert failed, size %u step %d\n", size, n);
+				goto out;
+			}
+
+			if (!assert_one_hole(&mm, size*(n + 1), size*count))
+				goto out;
+		}
+
+		if (!assert_continuous(&mm, size))
+			goto out;
+
+		drm_random_reorder(order, count, &prng);
+		for_each_prime_number_from(n, 1, min(count, max_prime)) {
+			for (m = 0; m < n; m++) {
+				node = &nodes[order[(o + m) % count]];
+				drm_mm_remove_node(node);
+				__set_bit(node_index(node), bitmap);
+			}
+
+			for (m = 0; m < n; m++) {
+				unsigned int first;
+
+				node = &nodes[order[(o + m) % count]];
+				if (!expect_insert(&mm, node,
+						   size, 0, 0,
+						   bottomup)) {
+					pr_err("insert failed, step %d/%d\n", m, n);
+					goto out;
+				}
+
+				first = find_first_bit(bitmap, count);
+				if (node_index(node) != first) {
+					pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
+					       m, n, first, node_index(node));
+					goto out;
+				}
+				__clear_bit(first, bitmap);
+			}
+
+			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
+
+			o += n;
+		}
+
+		drm_mm_for_each_node_safe(node, next, &mm)
+			drm_mm_remove_node(node);
+		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+	}
+
+	ret = 0;
+out:
+	drm_mm_for_each_node_safe(node, next, &mm)
+		drm_mm_remove_node(node);
+	drm_mm_takedown(&mm);
+	kfree(order);
+err_bitmap:
+	kfree(bitmap);
+err_nodes:
+	vfree(nodes);
+err:
+	return ret;
+}
+
 static void separate_adjacent_colors(const struct drm_mm_node *node,
 				     unsigned long color,
 				     u64 *start,
@@ -1904,7 +1999,7 @@ static int evict_color(struct drm_mm *mm,
 	drm_mm_scan_init_with_range(&scan, mm,
 				    size, alignment, color,
 				    range_start, range_end,
-				    mode->create_flags);
+				    mode->mode);
 	if (!evict_nodes(&scan,
 			 nodes, order, count, true,
 			 &evict_list))
@@ -1912,8 +2007,7 @@ static int evict_color(struct drm_mm *mm,
 
 	memset(&tmp, 0, sizeof(tmp));
 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
-					 mode->search_flags,
-					 mode->create_flags);
+					 DRM_MM_INSERT_EVICT);
 	if (err) {
 		pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
 		       size, alignment, color, err);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 03defda77766..1622db24cd39 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,8 +109,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
 	if (pool == AGP_TYPE) {
 		retval = drm_mm_insert_node(&dev_priv->agp_mm,
 					    &item->mm_node,
-					    mem->size, 0,
-					    DRM_MM_SEARCH_DEFAULT);
+					    mem->size);
 		offset = item->mm_node.start;
 	} else {
 #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -122,8 +121,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
 #else
 		retval = drm_mm_insert_node(&dev_priv->vram_mm,
 					    &item->mm_node,
-					    mem->size, 0,
-					    DRM_MM_SEARCH_DEFAULT);
+					    mem->size);
 		offset = item->mm_node.start;
 #endif
 	}
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index acc056644cd0..788feed208d7 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -360,7 +360,7 @@ static int sti_bind(struct device *dev)
 
 	private = ddev->dev_private;
 	if (ddev->mode_config.num_connector) {
-		fbdev = drm_fbdev_cma_init(ddev, 32, ddev->mode_config.num_crtc,
+		fbdev = drm_fbdev_cma_init(ddev, 32,
 					   ddev->mode_config.num_connector);
 		if (IS_ERR(fbdev)) {
 			DRM_DEBUG_DRIVER("Warning: fails to create fbdev\n");
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 8b6ce619ad81..2c3beff8b53e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -40,9 +40,7 @@ struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm)
 
 	drm->mode_config.funcs = &sun4i_de_mode_config_funcs;
 
-	return drm_fbdev_cma_init(drm, 32,
-				  drm->mode_config.num_crtc,
-				  drm->mode_config.num_connector);
+	return drm_fbdev_cma_init(drm, 32, drm->mode_config.num_connector);
 }
 
 void sun4i_framebuffer_free(struct drm_device *drm)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index f896e2ff7d47..f142f6a4db25 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -309,7 +309,7 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
 	struct drm_device *drm = fbdev->base.dev;
 	int err;
 
-	err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
+	err = drm_fb_helper_init(drm, &fbdev->base, max_connectors);
 	if (err < 0) {
 		dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
 			err);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 7d853e6b5ff0..b523a5d4a38c 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -128,8 +128,8 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
 	if (!bo->mm)
 		return -ENOMEM;
 
-	err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
-					 PAGE_SIZE, 0, 0, 0);
+	err = drm_mm_insert_node_generic(&tegra->mm,
+					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
 	if (err < 0) {
 		dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
 			err);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 919294a735fe..372d86fbb093 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -403,8 +403,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
 	drm_mode_config_reset(ddev);
 
 	priv->fbdev = drm_fbdev_cma_init(ddev, bpp,
-			ddev->mode_config.num_crtc,
-			ddev->mode_config.num_connector);
+					 ddev->mode_config.num_connector);
 	if (IS_ERR(priv->fbdev)) {
 		ret = PTR_ERR(priv->fbdev);
 		goto init_failed;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 988c48d1cf3e..90a6c0b03afc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -54,9 +54,8 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 {
 	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 	struct drm_mm *mm = &rman->mm;
-	struct drm_mm_node *node = NULL;
-	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
-	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	struct drm_mm_node *node;
+	enum drm_mm_insert_mode mode;
 	unsigned long lpfn;
 	int ret;
 
@@ -68,16 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 	if (!node)
 		return -ENOMEM;
 
-	if (place->flags & TTM_PL_FLAG_TOPDOWN) {
-		sflags = DRM_MM_SEARCH_BELOW;
-		aflags = DRM_MM_CREATE_TOP;
-	}
+	mode = DRM_MM_INSERT_BEST;
+	if (place->flags & TTM_PL_FLAG_TOPDOWN)
+		mode = DRM_MM_INSERT_HIGH;
 
 	spin_lock(&rman->lock);
-	ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
+	ret = drm_mm_insert_node_in_range(mm, node,
+					  mem->num_pages,
 					  mem->page_alignment, 0,
-					  place->fpfn, lpfn,
-					  sflags, aflags);
+					  place->fpfn, lpfn, mode);
 	spin_unlock(&rman->lock);
 
 	if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index b8dc06d68777..8e8d60e9a1a2 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -441,8 +441,7 @@ int udl_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, &ufbdev->helper,
-				 1, 1);
+	ret = drm_fb_helper_init(dev, &ufbdev->helper, 1);
 	if (ret)
 		goto free;
 
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index e53df59cb139..e1517d07cb7d 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -2,10 +2,12 @@ config DRM_VC4
 	tristate "Broadcom VC4 Graphics"
 	depends on ARCH_BCM2835 || COMPILE_TEST
 	depends on DRM
+	depends on COMMON_CLK
 	select DRM_KMS_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
 	select DRM_PANEL
+	select DRM_MIPI_DSI
 	help
 	  Choose this option if you have a system that has a Broadcom
 	  VC4 GPU, such as the Raspberry Pi or other BCM2708/BCM2835.
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 7757f69a8a77..61f45d122bd0 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -8,6 +8,7 @@ vc4-y := \
 	vc4_crtc.o \
 	vc4_drv.o \
 	vc4_dpi.o \
+	vc4_dsi.o \
 	vc4_kms.o \
 	vc4_gem.o \
 	vc4_hdmi.o \
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 75b708b36890..a0cd4ea15f07 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -348,38 +348,40 @@ static u32 vc4_get_fifo_full_level(u32 format)
 }
 
 /*
- * Returns the clock select bit for the connector attached to the
- * CRTC.
+ * Returns the encoder attached to the CRTC.
+ *
+ * VC4 can only scan out to one encoder at a time, while the DRM core
+ * allows drivers to push pixels to more than one encoder from the
+ * same CRTC.
  */
-static int vc4_get_clock_select(struct drm_crtc *crtc)
+static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
 {
 	struct drm_connector *connector;
 
 	drm_for_each_connector(connector, crtc->dev) {
 		if (connector->state->crtc == crtc) {
-			struct drm_encoder *encoder = connector->encoder;
-			struct vc4_encoder *vc4_encoder =
-				to_vc4_encoder(encoder);
-
-			return vc4_encoder->clock_select;
+			return connector->encoder;
 		}
 	}
 
-	return -1;
+	return NULL;
 }
 
 static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
+	struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
 	struct drm_crtc_state *state = crtc->state;
 	struct drm_display_mode *mode = &state->adjusted_mode;
 	bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
 	u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
-	u32 format = PV_CONTROL_FORMAT_24;
+	bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
+		       vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
+	u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
 	bool debug_dump_regs = false;
-	int clock_select = vc4_get_clock_select(crtc);
 
 	if (debug_dump_regs) {
 		DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
@@ -435,17 +437,19 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
 		 */
 		CRTC_WRITE(PV_V_CONTROL,
 			   PV_VCONTROL_CONTINUOUS |
+			   (is_dsi ? PV_VCONTROL_DSI : 0) |
 			   PV_VCONTROL_INTERLACE |
 			   VC4_SET_FIELD(mode->htotal * pixel_rep / 2,
 					 PV_VCONTROL_ODD_DELAY));
 		CRTC_WRITE(PV_VSYNCD_EVEN, 0);
 	} else {
-		CRTC_WRITE(PV_V_CONTROL, PV_VCONTROL_CONTINUOUS);
+		CRTC_WRITE(PV_V_CONTROL,
+			   PV_VCONTROL_CONTINUOUS |
+			   (is_dsi ? PV_VCONTROL_DSI : 0));
 	}
 
 	CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
 
-
 	CRTC_WRITE(PV_CONTROL,
 		   VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
 		   VC4_SET_FIELD(vc4_get_fifo_full_level(format),
@@ -454,7 +458,8 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
 		   PV_CONTROL_CLR_AT_START |
 		   PV_CONTROL_TRIGGER_UNDERFLOW |
 		   PV_CONTROL_WAIT_HSTART |
-		   VC4_SET_FIELD(clock_select, PV_CONTROL_CLK_SELECT) |
+		   VC4_SET_FIELD(vc4_encoder->clock_select,
+				 PV_CONTROL_CLK_SELECT) |
 		   PV_CONTROL_FIFO_CLR |
 		   PV_CONTROL_EN);
 
@@ -588,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
 
 	spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
 	ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
-				 dlist_count, 1, 0);
+				 dlist_count);
 	spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index c4d5e6a8d6f2..5db06bdb5f27 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -18,6 +18,7 @@
 static const struct drm_info_list vc4_debugfs_list[] = {
 	{"bo_stats", vc4_bo_stats_debugfs, 0},
 	{"dpi_regs", vc4_dpi_debugfs_regs, 0},
+	{"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1},
 	{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
 	{"vec_regs", vc4_vec_debugfs_regs, 0},
 	{"hvs_regs", vc4_hvs_debugfs_regs, 0},
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index e4f42ebee517..a459745e96f7 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -295,6 +295,7 @@ static struct platform_driver *const component_drivers[] = {
 	&vc4_hdmi_driver,
 	&vc4_vec_driver,
 	&vc4_dpi_driver,
+	&vc4_dsi_driver,
 	&vc4_hvs_driver,
 	&vc4_crtc_driver,
 	&vc4_v3d_driver,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 78e3e5a43fb0..0e59f3ee1b83 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -18,6 +18,7 @@ struct vc4_dev {
 	struct vc4_hvs *hvs;
 	struct vc4_v3d *v3d;
 	struct vc4_dpi *dpi;
+	struct vc4_dsi *dsi1;
 	struct vc4_vec *vec;
 
 	struct drm_fbdev_cma *fbdev;
@@ -465,6 +466,10 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
 extern struct platform_driver vc4_dpi_driver;
 int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
 
+/* vc4_dsi.c */
+extern struct platform_driver vc4_dsi_driver;
+int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
+
 /* vc4_gem.c */
 void vc4_gem_init(struct drm_device *dev);
 void vc4_gem_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
new file mode 100644
index 000000000000..2736b0331beb
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -0,0 +1,1725 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * DOC: VC4 DSI0/DSI1 module
+ *
+ * BCM2835 contains two DSI modules, DSI0 and DSI1.  DSI0 is a
+ * single-lane DSI controller, while DSI1 is a more modern 4-lane DSI
+ * controller.
+ *
+ * Most Raspberry Pi boards expose DSI1 as their "DISPLAY" connector,
+ * while the compute module brings both DSI0 and DSI1 out.
+ *
+ * This driver has been tested for DSI1 video-mode display only
+ * currently, with most of the information necessary for DSI0
+ * hopefully present.
+ */
+
+#include "drm_atomic_helper.h"
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+#include "drm_mipi_dsi.h"
+#include "drm_panel.h"
+#include "linux/clk.h"
+#include "linux/clk-provider.h"
+#include "linux/completion.h"
+#include "linux/component.h"
+#include "linux/dmaengine.h"
+#include "linux/i2c.h"
+#include "linux/of_address.h"
+#include "linux/of_platform.h"
+#include "linux/pm_runtime.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define DSI_CMD_FIFO_DEPTH  16
+#define DSI_PIX_FIFO_DEPTH 256
+#define DSI_PIX_FIFO_WIDTH   4
+
+#define DSI0_CTRL		0x00
+
+/* Command packet control. */
+#define DSI0_TXPKT1C		0x04 /* AKA PKTC */
+#define DSI1_TXPKT1C		0x04
+# define DSI_TXPKT1C_TRIG_CMD_MASK	VC4_MASK(31, 24)
+# define DSI_TXPKT1C_TRIG_CMD_SHIFT	24
+# define DSI_TXPKT1C_CMD_REPEAT_MASK	VC4_MASK(23, 10)
+# define DSI_TXPKT1C_CMD_REPEAT_SHIFT	10
+
+# define DSI_TXPKT1C_DISPLAY_NO_MASK	VC4_MASK(9, 8)
+# define DSI_TXPKT1C_DISPLAY_NO_SHIFT	8
+/* Short, trigger, BTA, or a long packet that fits all in CMDFIFO. */
+# define DSI_TXPKT1C_DISPLAY_NO_SHORT		0
+/* Primary display where cmdfifo provides part of the payload and
+ * pixelvalve the rest.
+ */
+# define DSI_TXPKT1C_DISPLAY_NO_PRIMARY		1
+/* Secondary display where cmdfifo provides part of the payload and
+ * pixfifo the rest.
+ */
+# define DSI_TXPKT1C_DISPLAY_NO_SECONDARY	2
+
+# define DSI_TXPKT1C_CMD_TX_TIME_MASK	VC4_MASK(7, 6)
+# define DSI_TXPKT1C_CMD_TX_TIME_SHIFT	6
+
+# define DSI_TXPKT1C_CMD_CTRL_MASK	VC4_MASK(5, 4)
+# define DSI_TXPKT1C_CMD_CTRL_SHIFT	4
+/* Command only.  Uses TXPKT1H and DISPLAY_NO */
+# define DSI_TXPKT1C_CMD_CTRL_TX	0
+/* Command with BTA for either ack or read data. */
+# define DSI_TXPKT1C_CMD_CTRL_RX	1
+/* Trigger according to TRIG_CMD */
+# define DSI_TXPKT1C_CMD_CTRL_TRIG	2
+/* BTA alone for getting error status after a command, or a TE trigger
+ * without a previous command.
+ */
+# define DSI_TXPKT1C_CMD_CTRL_BTA	3
+
+# define DSI_TXPKT1C_CMD_MODE_LP	BIT(3)
+# define DSI_TXPKT1C_CMD_TYPE_LONG	BIT(2)
+# define DSI_TXPKT1C_CMD_TE_EN		BIT(1)
+# define DSI_TXPKT1C_CMD_EN		BIT(0)
+
+/* Command packet header. */
+#define DSI0_TXPKT1H		0x08 /* AKA PKTH */
+#define DSI1_TXPKT1H		0x08
+# define DSI_TXPKT1H_BC_CMDFIFO_MASK	VC4_MASK(31, 24)
+# define DSI_TXPKT1H_BC_CMDFIFO_SHIFT	24
+# define DSI_TXPKT1H_BC_PARAM_MASK	VC4_MASK(23, 8)
+# define DSI_TXPKT1H_BC_PARAM_SHIFT	8
+# define DSI_TXPKT1H_BC_DT_MASK		VC4_MASK(7, 0)
+# define DSI_TXPKT1H_BC_DT_SHIFT	0
+
+#define DSI0_RXPKT1H		0x0c /* AKA RX1_PKTH */
+#define DSI1_RXPKT1H		0x14
+# define DSI_RXPKT1H_CRC_ERR		BIT(31)
+# define DSI_RXPKT1H_DET_ERR		BIT(30)
+# define DSI_RXPKT1H_ECC_ERR		BIT(29)
+# define DSI_RXPKT1H_COR_ERR		BIT(28)
+# define DSI_RXPKT1H_INCOMP_PKT		BIT(25)
+# define DSI_RXPKT1H_PKT_TYPE_LONG	BIT(24)
+/* Byte count if DSI_RXPKT1H_PKT_TYPE_LONG */
+# define DSI_RXPKT1H_BC_PARAM_MASK	VC4_MASK(23, 8)
+# define DSI_RXPKT1H_BC_PARAM_SHIFT	8
+/* Short return bytes if !DSI_RXPKT1H_PKT_TYPE_LONG */
+# define DSI_RXPKT1H_SHORT_1_MASK	VC4_MASK(23, 16)
+# define DSI_RXPKT1H_SHORT_1_SHIFT	16
+# define DSI_RXPKT1H_SHORT_0_MASK	VC4_MASK(15, 8)
+# define DSI_RXPKT1H_SHORT_0_SHIFT	8
+# define DSI_RXPKT1H_DT_LP_CMD_MASK	VC4_MASK(7, 0)
+# define DSI_RXPKT1H_DT_LP_CMD_SHIFT	0
+
+#define DSI0_RXPKT2H		0x10 /* AKA RX2_PKTH */
+#define DSI1_RXPKT2H		0x18
+# define DSI_RXPKT1H_DET_ERR		BIT(30)
+# define DSI_RXPKT1H_ECC_ERR		BIT(29)
+# define DSI_RXPKT1H_COR_ERR		BIT(28)
+# define DSI_RXPKT1H_INCOMP_PKT		BIT(25)
+# define DSI_RXPKT1H_BC_PARAM_MASK	VC4_MASK(23, 8)
+# define DSI_RXPKT1H_BC_PARAM_SHIFT	8
+# define DSI_RXPKT1H_DT_MASK		VC4_MASK(7, 0)
+# define DSI_RXPKT1H_DT_SHIFT		0
+
+#define DSI0_TXPKT_CMD_FIFO	0x14 /* AKA CMD_DATAF */
+#define DSI1_TXPKT_CMD_FIFO	0x1c
+
+#define DSI0_DISP0_CTRL		0x18
+# define DSI_DISP0_PIX_CLK_DIV_MASK	VC4_MASK(21, 13)
+# define DSI_DISP0_PIX_CLK_DIV_SHIFT	13
+# define DSI_DISP0_LP_STOP_CTRL_MASK	VC4_MASK(12, 11)
+# define DSI_DISP0_LP_STOP_CTRL_SHIFT	11
+# define DSI_DISP0_LP_STOP_DISABLE	0
+# define DSI_DISP0_LP_STOP_PERLINE	1
+# define DSI_DISP0_LP_STOP_PERFRAME	2
+
+/* Transmit RGB pixels and null packets only during HACTIVE, instead
+ * of going to LP-STOP.
+ */
+# define DSI_DISP_HACTIVE_NULL		BIT(10)
+/* Transmit blanking packet only during vblank, instead of allowing LP-STOP. */
+# define DSI_DISP_VBLP_CTRL		BIT(9)
+/* Transmit blanking packet only during HFP, instead of allowing LP-STOP. */
+# define DSI_DISP_HFP_CTRL		BIT(8)
+/* Transmit blanking packet only during HBP, instead of allowing LP-STOP. */
+# define DSI_DISP_HBP_CTRL		BIT(7)
+# define DSI_DISP0_CHANNEL_MASK		VC4_MASK(6, 5)
+# define DSI_DISP0_CHANNEL_SHIFT	5
+/* Enables end events for HSYNC/VSYNC, not just start events. */
+# define DSI_DISP0_ST_END		BIT(4)
+# define DSI_DISP0_PFORMAT_MASK		VC4_MASK(3, 2)
+# define DSI_DISP0_PFORMAT_SHIFT	2
+# define DSI_PFORMAT_RGB565		0
+# define DSI_PFORMAT_RGB666_PACKED	1
+# define DSI_PFORMAT_RGB666		2
+# define DSI_PFORMAT_RGB888		3
+/* Default is VIDEO mode. */
+# define DSI_DISP0_COMMAND_MODE		BIT(1)
+# define DSI_DISP0_ENABLE		BIT(0)
+
+#define DSI0_DISP1_CTRL		0x1c
+#define DSI1_DISP1_CTRL		0x2c
+/* Format of the data written to TXPKT_PIX_FIFO. */
+# define DSI_DISP1_PFORMAT_MASK		VC4_MASK(2, 1)
+# define DSI_DISP1_PFORMAT_SHIFT	1
+# define DSI_DISP1_PFORMAT_16BIT	0
+# define DSI_DISP1_PFORMAT_24BIT	1
+# define DSI_DISP1_PFORMAT_32BIT_LE	2
+# define DSI_DISP1_PFORMAT_32BIT_BE	3
+
+/* DISP1 is always command mode. */
+# define DSI_DISP1_ENABLE		BIT(0)
+
+#define DSI0_TXPKT_PIX_FIFO		0x20 /* AKA PIX_FIFO */
+
+#define DSI0_INT_STAT		0x24
+#define DSI0_INT_EN		0x28
+# define DSI1_INT_PHY_D3_ULPS		BIT(30)
+# define DSI1_INT_PHY_D3_STOP		BIT(29)
+# define DSI1_INT_PHY_D2_ULPS		BIT(28)
+# define DSI1_INT_PHY_D2_STOP		BIT(27)
+# define DSI1_INT_PHY_D1_ULPS		BIT(26)
+# define DSI1_INT_PHY_D1_STOP		BIT(25)
+# define DSI1_INT_PHY_D0_ULPS		BIT(24)
+# define DSI1_INT_PHY_D0_STOP		BIT(23)
+# define DSI1_INT_FIFO_ERR		BIT(22)
+# define DSI1_INT_PHY_DIR_RTF		BIT(21)
+# define DSI1_INT_PHY_RXLPDT		BIT(20)
+# define DSI1_INT_PHY_RXTRIG		BIT(19)
+# define DSI1_INT_PHY_D0_LPDT		BIT(18)
+# define DSI1_INT_PHY_DIR_FTR		BIT(17)
+
+/* Signaled when the clock lane enters the given state. */
+# define DSI1_INT_PHY_CLOCK_ULPS	BIT(16)
+# define DSI1_INT_PHY_CLOCK_HS		BIT(15)
+# define DSI1_INT_PHY_CLOCK_STOP	BIT(14)
+
+/* Signaled on timeouts */
+# define DSI1_INT_PR_TO			BIT(13)
+# define DSI1_INT_TA_TO			BIT(12)
+# define DSI1_INT_LPRX_TO		BIT(11)
+# define DSI1_INT_HSTX_TO		BIT(10)
+
+/* Contention on a line when trying to drive the line low */
+# define DSI1_INT_ERR_CONT_LP1		BIT(9)
+# define DSI1_INT_ERR_CONT_LP0		BIT(8)
+
+/* Control error: incorrect line state sequence on data lane 0. */
+# define DSI1_INT_ERR_CONTROL		BIT(7)
+/* LPDT synchronization error (bits received not a multiple of 8. */
+
+# define DSI1_INT_ERR_SYNC_ESC		BIT(6)
+/* Signaled after receiving an error packet from the display in
+ * response to a read.
+ */
+# define DSI1_INT_RXPKT2		BIT(5)
+/* Signaled after receiving a packet.  The header and optional short
+ * response will be in RXPKT1H, and a long response will be in the
+ * RXPKT_FIFO.
+ */
+# define DSI1_INT_RXPKT1		BIT(4)
+# define DSI1_INT_TXPKT2_DONE		BIT(3)
+# define DSI1_INT_TXPKT2_END		BIT(2)
+/* Signaled after all repeats of TXPKT1 are transferred. */
+# define DSI1_INT_TXPKT1_DONE		BIT(1)
+/* Signaled after each TXPKT1 repeat is scheduled. */
+# define DSI1_INT_TXPKT1_END		BIT(0)
+
+#define DSI1_INTERRUPTS_ALWAYS_ENABLED	(DSI1_INT_ERR_SYNC_ESC | \
+					 DSI1_INT_ERR_CONTROL |	 \
+					 DSI1_INT_ERR_CONT_LP0 | \
+					 DSI1_INT_ERR_CONT_LP1 | \
+					 DSI1_INT_HSTX_TO |	 \
+					 DSI1_INT_LPRX_TO |	 \
+					 DSI1_INT_TA_TO |	 \
+					 DSI1_INT_PR_TO)
+
+#define DSI0_STAT		0x2c
+#define DSI0_HSTX_TO_CNT	0x30
+#define DSI0_LPRX_TO_CNT	0x34
+#define DSI0_TA_TO_CNT		0x38
+#define DSI0_PR_TO_CNT		0x3c
+#define DSI0_PHYC		0x40
+# define DSI1_PHYC_ESC_CLK_LPDT_MASK	VC4_MASK(25, 20)
+# define DSI1_PHYC_ESC_CLK_LPDT_SHIFT	20
+# define DSI1_PHYC_HS_CLK_CONTINUOUS	BIT(18)
+# define DSI0_PHYC_ESC_CLK_LPDT_MASK	VC4_MASK(17, 12)
+# define DSI0_PHYC_ESC_CLK_LPDT_SHIFT	12
+# define DSI1_PHYC_CLANE_ULPS		BIT(17)
+# define DSI1_PHYC_CLANE_ENABLE		BIT(16)
+# define DSI_PHYC_DLANE3_ULPS		BIT(13)
+# define DSI_PHYC_DLANE3_ENABLE		BIT(12)
+# define DSI0_PHYC_HS_CLK_CONTINUOUS	BIT(10)
+# define DSI0_PHYC_CLANE_ULPS		BIT(9)
+# define DSI_PHYC_DLANE2_ULPS		BIT(9)
+# define DSI0_PHYC_CLANE_ENABLE		BIT(8)
+# define DSI_PHYC_DLANE2_ENABLE		BIT(8)
+# define DSI_PHYC_DLANE1_ULPS		BIT(5)
+# define DSI_PHYC_DLANE1_ENABLE		BIT(4)
+# define DSI_PHYC_DLANE0_FORCE_STOP	BIT(2)
+# define DSI_PHYC_DLANE0_ULPS		BIT(1)
+# define DSI_PHYC_DLANE0_ENABLE		BIT(0)
+
+#define DSI0_HS_CLT0		0x44
+#define DSI0_HS_CLT1		0x48
+#define DSI0_HS_CLT2		0x4c
+#define DSI0_HS_DLT3		0x50
+#define DSI0_HS_DLT4		0x54
+#define DSI0_HS_DLT5		0x58
+#define DSI0_HS_DLT6		0x5c
+#define DSI0_HS_DLT7		0x60
+
+#define DSI0_PHY_AFEC0		0x64
+# define DSI0_PHY_AFEC0_DDR2CLK_EN		BIT(26)
+# define DSI0_PHY_AFEC0_DDRCLK_EN		BIT(25)
+# define DSI0_PHY_AFEC0_LATCH_ULPS		BIT(24)
+# define DSI1_PHY_AFEC0_IDR_DLANE3_MASK		VC4_MASK(31, 29)
+# define DSI1_PHY_AFEC0_IDR_DLANE3_SHIFT	29
+# define DSI1_PHY_AFEC0_IDR_DLANE2_MASK		VC4_MASK(28, 26)
+# define DSI1_PHY_AFEC0_IDR_DLANE2_SHIFT	26
+# define DSI1_PHY_AFEC0_IDR_DLANE1_MASK		VC4_MASK(27, 23)
+# define DSI1_PHY_AFEC0_IDR_DLANE1_SHIFT	23
+# define DSI1_PHY_AFEC0_IDR_DLANE0_MASK		VC4_MASK(22, 20)
+# define DSI1_PHY_AFEC0_IDR_DLANE0_SHIFT	20
+# define DSI1_PHY_AFEC0_IDR_CLANE_MASK		VC4_MASK(19, 17)
+# define DSI1_PHY_AFEC0_IDR_CLANE_SHIFT		17
+# define DSI0_PHY_AFEC0_ACTRL_DLANE1_MASK	VC4_MASK(23, 20)
+# define DSI0_PHY_AFEC0_ACTRL_DLANE1_SHIFT	20
+# define DSI0_PHY_AFEC0_ACTRL_DLANE0_MASK	VC4_MASK(19, 16)
+# define DSI0_PHY_AFEC0_ACTRL_DLANE0_SHIFT	16
+# define DSI0_PHY_AFEC0_ACTRL_CLANE_MASK	VC4_MASK(15, 12)
+# define DSI0_PHY_AFEC0_ACTRL_CLANE_SHIFT	12
+# define DSI1_PHY_AFEC0_DDR2CLK_EN		BIT(16)
+# define DSI1_PHY_AFEC0_DDRCLK_EN		BIT(15)
+# define DSI1_PHY_AFEC0_LATCH_ULPS		BIT(14)
+# define DSI1_PHY_AFEC0_RESET			BIT(13)
+# define DSI1_PHY_AFEC0_PD			BIT(12)
+# define DSI0_PHY_AFEC0_RESET			BIT(11)
+# define DSI1_PHY_AFEC0_PD_BG			BIT(11)
+# define DSI0_PHY_AFEC0_PD			BIT(10)
+# define DSI1_PHY_AFEC0_PD_DLANE3		BIT(10)
+# define DSI0_PHY_AFEC0_PD_BG			BIT(9)
+# define DSI1_PHY_AFEC0_PD_DLANE2		BIT(9)
+# define DSI0_PHY_AFEC0_PD_DLANE1		BIT(8)
+# define DSI1_PHY_AFEC0_PD_DLANE1		BIT(8)
+# define DSI_PHY_AFEC0_PTATADJ_MASK		VC4_MASK(7, 4)
+# define DSI_PHY_AFEC0_PTATADJ_SHIFT		4
+# define DSI_PHY_AFEC0_CTATADJ_MASK		VC4_MASK(3, 0)
+# define DSI_PHY_AFEC0_CTATADJ_SHIFT		0
+
+#define DSI0_PHY_AFEC1		0x68
+# define DSI0_PHY_AFEC1_IDR_DLANE1_MASK		VC4_MASK(10, 8)
+# define DSI0_PHY_AFEC1_IDR_DLANE1_SHIFT	8
+# define DSI0_PHY_AFEC1_IDR_DLANE0_MASK		VC4_MASK(6, 4)
+# define DSI0_PHY_AFEC1_IDR_DLANE0_SHIFT	4
+# define DSI0_PHY_AFEC1_IDR_CLANE_MASK		VC4_MASK(2, 0)
+# define DSI0_PHY_AFEC1_IDR_CLANE_SHIFT		0
+
+#define DSI0_TST_SEL		0x6c
+#define DSI0_TST_MON		0x70
+#define DSI0_ID			0x74
+# define DSI_ID_VALUE		0x00647369
+
+#define DSI1_CTRL		0x00
+# define DSI_CTRL_HS_CLKC_MASK		VC4_MASK(15, 14)
+# define DSI_CTRL_HS_CLKC_SHIFT		14
+# define DSI_CTRL_HS_CLKC_BYTE		0
+# define DSI_CTRL_HS_CLKC_DDR2		1
+# define DSI_CTRL_HS_CLKC_DDR		2
+
+# define DSI_CTRL_RX_LPDT_EOT_DISABLE	BIT(13)
+# define DSI_CTRL_LPDT_EOT_DISABLE	BIT(12)
+# define DSI_CTRL_HSDT_EOT_DISABLE	BIT(11)
+# define DSI_CTRL_SOFT_RESET_CFG	BIT(10)
+# define DSI_CTRL_CAL_BYTE		BIT(9)
+# define DSI_CTRL_INV_BYTE		BIT(8)
+# define DSI_CTRL_CLR_LDF		BIT(7)
+# define DSI0_CTRL_CLR_PBCF		BIT(6)
+# define DSI1_CTRL_CLR_RXF		BIT(6)
+# define DSI0_CTRL_CLR_CPBCF		BIT(5)
+# define DSI1_CTRL_CLR_PDF		BIT(5)
+# define DSI0_CTRL_CLR_PDF		BIT(4)
+# define DSI1_CTRL_CLR_CDF		BIT(4)
+# define DSI0_CTRL_CLR_CDF		BIT(3)
+# define DSI0_CTRL_CTRL2		BIT(2)
+# define DSI1_CTRL_DISABLE_DISP_CRCC	BIT(2)
+# define DSI0_CTRL_CTRL1		BIT(1)
+# define DSI1_CTRL_DISABLE_DISP_ECCC	BIT(1)
+# define DSI0_CTRL_CTRL0		BIT(0)
+# define DSI1_CTRL_EN			BIT(0)
+# define DSI0_CTRL_RESET_FIFOS		(DSI_CTRL_CLR_LDF | \
+					 DSI0_CTRL_CLR_PBCF | \
+					 DSI0_CTRL_CLR_CPBCF |	\
+					 DSI0_CTRL_CLR_PDF | \
+					 DSI0_CTRL_CLR_CDF)
+# define DSI1_CTRL_RESET_FIFOS		(DSI_CTRL_CLR_LDF | \
+					 DSI1_CTRL_CLR_RXF | \
+					 DSI1_CTRL_CLR_PDF | \
+					 DSI1_CTRL_CLR_CDF)
+
+#define DSI1_TXPKT2C		0x0c
+#define DSI1_TXPKT2H		0x10
+#define DSI1_TXPKT_PIX_FIFO	0x20
+#define DSI1_RXPKT_FIFO		0x24
+#define DSI1_DISP0_CTRL		0x28
+#define DSI1_INT_STAT		0x30
+#define DSI1_INT_EN		0x34
+/* State reporting bits.  These mostly behave like INT_STAT, where
+ * writing a 1 clears the bit.
+ */
+#define DSI1_STAT		0x38
+# define DSI1_STAT_PHY_D3_ULPS		BIT(31)
+# define DSI1_STAT_PHY_D3_STOP		BIT(30)
+# define DSI1_STAT_PHY_D2_ULPS		BIT(29)
+# define DSI1_STAT_PHY_D2_STOP		BIT(28)
+# define DSI1_STAT_PHY_D1_ULPS		BIT(27)
+# define DSI1_STAT_PHY_D1_STOP		BIT(26)
+# define DSI1_STAT_PHY_D0_ULPS		BIT(25)
+# define DSI1_STAT_PHY_D0_STOP		BIT(24)
+# define DSI1_STAT_FIFO_ERR		BIT(23)
+# define DSI1_STAT_PHY_RXLPDT		BIT(22)
+# define DSI1_STAT_PHY_RXTRIG		BIT(21)
+# define DSI1_STAT_PHY_D0_LPDT		BIT(20)
+/* Set when in forward direction */
+# define DSI1_STAT_PHY_DIR		BIT(19)
+# define DSI1_STAT_PHY_CLOCK_ULPS	BIT(18)
+# define DSI1_STAT_PHY_CLOCK_HS		BIT(17)
+# define DSI1_STAT_PHY_CLOCK_STOP	BIT(16)
+# define DSI1_STAT_PR_TO		BIT(15)
+# define DSI1_STAT_TA_TO		BIT(14)
+# define DSI1_STAT_LPRX_TO		BIT(13)
+# define DSI1_STAT_HSTX_TO		BIT(12)
+# define DSI1_STAT_ERR_CONT_LP1		BIT(11)
+# define DSI1_STAT_ERR_CONT_LP0		BIT(10)
+# define DSI1_STAT_ERR_CONTROL		BIT(9)
+# define DSI1_STAT_ERR_SYNC_ESC		BIT(8)
+# define DSI1_STAT_RXPKT2		BIT(7)
+# define DSI1_STAT_RXPKT1		BIT(6)
+# define DSI1_STAT_TXPKT2_BUSY		BIT(5)
+# define DSI1_STAT_TXPKT2_DONE		BIT(4)
+# define DSI1_STAT_TXPKT2_END		BIT(3)
+# define DSI1_STAT_TXPKT1_BUSY		BIT(2)
+# define DSI1_STAT_TXPKT1_DONE		BIT(1)
+# define DSI1_STAT_TXPKT1_END		BIT(0)
+
+#define DSI1_HSTX_TO_CNT	0x3c
+#define DSI1_LPRX_TO_CNT	0x40
+#define DSI1_TA_TO_CNT		0x44
+#define DSI1_PR_TO_CNT		0x48
+#define DSI1_PHYC		0x4c
+
+#define DSI1_HS_CLT0		0x50
+# define DSI_HS_CLT0_CZERO_MASK		VC4_MASK(26, 18)
+# define DSI_HS_CLT0_CZERO_SHIFT	18
+# define DSI_HS_CLT0_CPRE_MASK		VC4_MASK(17, 9)
+# define DSI_HS_CLT0_CPRE_SHIFT		9
+# define DSI_HS_CLT0_CPREP_MASK		VC4_MASK(8, 0)
+# define DSI_HS_CLT0_CPREP_SHIFT	0
+
+#define DSI1_HS_CLT1		0x54
+# define DSI_HS_CLT1_CTRAIL_MASK	VC4_MASK(17, 9)
+# define DSI_HS_CLT1_CTRAIL_SHIFT	9
+# define DSI_HS_CLT1_CPOST_MASK		VC4_MASK(8, 0)
+# define DSI_HS_CLT1_CPOST_SHIFT	0
+
+#define DSI1_HS_CLT2		0x58
+# define DSI_HS_CLT2_WUP_MASK		VC4_MASK(23, 0)
+# define DSI_HS_CLT2_WUP_SHIFT		0
+
+#define DSI1_HS_DLT3		0x5c
+# define DSI_HS_DLT3_EXIT_MASK		VC4_MASK(26, 18)
+# define DSI_HS_DLT3_EXIT_SHIFT		18
+# define DSI_HS_DLT3_ZERO_MASK		VC4_MASK(17, 9)
+# define DSI_HS_DLT3_ZERO_SHIFT		9
+# define DSI_HS_DLT3_PRE_MASK		VC4_MASK(8, 0)
+# define DSI_HS_DLT3_PRE_SHIFT		0
+
+#define DSI1_HS_DLT4		0x60
+# define DSI_HS_DLT4_ANLAT_MASK		VC4_MASK(22, 18)
+# define DSI_HS_DLT4_ANLAT_SHIFT	18
+# define DSI_HS_DLT4_TRAIL_MASK		VC4_MASK(17, 9)
+# define DSI_HS_DLT4_TRAIL_SHIFT	9
+# define DSI_HS_DLT4_LPX_MASK		VC4_MASK(8, 0)
+# define DSI_HS_DLT4_LPX_SHIFT		0
+
+#define DSI1_HS_DLT5		0x64
+# define DSI_HS_DLT5_INIT_MASK		VC4_MASK(23, 0)
+# define DSI_HS_DLT5_INIT_SHIFT		0
+
+#define DSI1_HS_DLT6		0x68
+# define DSI_HS_DLT6_TA_GET_MASK	VC4_MASK(31, 24)
+# define DSI_HS_DLT6_TA_GET_SHIFT	24
+# define DSI_HS_DLT6_TA_SURE_MASK	VC4_MASK(23, 16)
+# define DSI_HS_DLT6_TA_SURE_SHIFT	16
+# define DSI_HS_DLT6_TA_GO_MASK		VC4_MASK(15, 8)
+# define DSI_HS_DLT6_TA_GO_SHIFT	8
+# define DSI_HS_DLT6_LP_LPX_MASK	VC4_MASK(7, 0)
+# define DSI_HS_DLT6_LP_LPX_SHIFT	0
+
+#define DSI1_HS_DLT7		0x6c
+# define DSI_HS_DLT7_LP_WUP_MASK	VC4_MASK(23, 0)
+# define DSI_HS_DLT7_LP_WUP_SHIFT	0
+
+#define DSI1_PHY_AFEC0		0x70
+
+#define DSI1_PHY_AFEC1		0x74
+# define DSI1_PHY_AFEC1_ACTRL_DLANE3_MASK	VC4_MASK(19, 16)
+# define DSI1_PHY_AFEC1_ACTRL_DLANE3_SHIFT	16
+# define DSI1_PHY_AFEC1_ACTRL_DLANE2_MASK	VC4_MASK(15, 12)
+# define DSI1_PHY_AFEC1_ACTRL_DLANE2_SHIFT	12
+# define DSI1_PHY_AFEC1_ACTRL_DLANE1_MASK	VC4_MASK(11, 8)
+# define DSI1_PHY_AFEC1_ACTRL_DLANE1_SHIFT	8
+# define DSI1_PHY_AFEC1_ACTRL_DLANE0_MASK	VC4_MASK(7, 4)
+# define DSI1_PHY_AFEC1_ACTRL_DLANE0_SHIFT	4
+# define DSI1_PHY_AFEC1_ACTRL_CLANE_MASK	VC4_MASK(3, 0)
+# define DSI1_PHY_AFEC1_ACTRL_CLANE_SHIFT	0
+
+#define DSI1_TST_SEL		0x78
+#define DSI1_TST_MON		0x7c
+#define DSI1_PHY_TST1		0x80
+#define DSI1_PHY_TST2		0x84
+#define DSI1_PHY_FIFO_STAT	0x88
+/* Actually, all registers in the range that aren't otherwise claimed
+ * will return the ID.
+ */
+#define DSI1_ID			0x8c
+
+/* General DSI hardware state. */
+struct vc4_dsi {
+	struct platform_device *pdev;
+
+	struct mipi_dsi_host dsi_host;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct drm_panel *panel;
+
+	void __iomem *regs;
+
+	struct dma_chan *reg_dma_chan;
+	dma_addr_t reg_dma_paddr;
+	u32 *reg_dma_mem;
+	dma_addr_t reg_paddr;
+
+	/* Whether we're on bcm2835's DSI0 or DSI1. */
+	int port;
+
+	/* DSI channel for the panel we're connected to. */
+	u32 channel;
+	u32 lanes;
+	enum mipi_dsi_pixel_format format;
+	u32 mode_flags;
+
+	/* Input clock from CPRMAN to the digital PHY, for the DSI
+	 * escape clock.
+	 */
+	struct clk *escape_clock;
+
+	/* Input clock to the analog PHY, used to generate the DSI bit
+	 * clock.
+	 */
+	struct clk *pll_phy_clock;
+
+	/* HS Clocks generated within the DSI analog PHY. */
+	struct clk_fixed_factor phy_clocks[3];
+
+	struct clk_hw_onecell_data *clk_onecell;
+
+	/* Pixel clock output to the pixelvalve, generated from the HS
+	 * clock.
+	 */
+	struct clk *pixel_clock;
+
+	struct completion xfer_completion;
+	int xfer_result;
+};
+
+#define host_to_dsi(host) container_of(host, struct vc4_dsi, dsi_host)
+
+static inline void
+dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
+{
+	struct dma_chan *chan = dsi->reg_dma_chan;
+	struct dma_async_tx_descriptor *tx;
+	dma_cookie_t cookie;
+	int ret;
+
+	/* DSI0 should be able to write normally. */
+	if (!chan) {
+		writel(val, dsi->regs + offset);
+		return;
+	}
+
+	*dsi->reg_dma_mem = val;
+
+	tx = chan->device->device_prep_dma_memcpy(chan,
+						  dsi->reg_paddr + offset,
+						  dsi->reg_dma_paddr,
+						  4, 0);
+	if (!tx) {
+		DRM_ERROR("Failed to set up DMA register write\n");
+		return;
+	}
+
+	cookie = tx->tx_submit(tx);
+	ret = dma_submit_error(cookie);
+	if (ret) {
+		DRM_ERROR("Failed to submit DMA: %d\n", ret);
+		return;
+	}
+	ret = dma_sync_wait(chan, cookie);
+	if (ret)
+		DRM_ERROR("Failed to wait for DMA: %d\n", ret);
+}
+
+#define DSI_READ(offset) readl(dsi->regs + (offset))
+#define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val)
+#define DSI_PORT_READ(offset) \
+	DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset)
+#define DSI_PORT_WRITE(offset, val) \
+	DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val)
+#define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit)
+
+/* VC4 DSI encoder KMS struct */
+struct vc4_dsi_encoder {
+	struct vc4_encoder base;
+	struct vc4_dsi *dsi;
+};
+
+static inline struct vc4_dsi_encoder *
+to_vc4_dsi_encoder(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct vc4_dsi_encoder, base.base);
+}
+
+/* VC4 DSI connector KMS struct */
+struct vc4_dsi_connector {
+	struct drm_connector base;
+	struct vc4_dsi *dsi;
+};
+
+static inline struct vc4_dsi_connector *
+to_vc4_dsi_connector(struct drm_connector *connector)
+{
+	return container_of(connector, struct vc4_dsi_connector, base);
+}
+
+#define DSI_REG(reg) { reg, #reg }
+static const struct {
+	u32 reg;
+	const char *name;
+} dsi0_regs[] = {
+	DSI_REG(DSI0_CTRL),
+	DSI_REG(DSI0_STAT),
+	DSI_REG(DSI0_HSTX_TO_CNT),
+	DSI_REG(DSI0_LPRX_TO_CNT),
+	DSI_REG(DSI0_TA_TO_CNT),
+	DSI_REG(DSI0_PR_TO_CNT),
+	DSI_REG(DSI0_DISP0_CTRL),
+	DSI_REG(DSI0_DISP1_CTRL),
+	DSI_REG(DSI0_INT_STAT),
+	DSI_REG(DSI0_INT_EN),
+	DSI_REG(DSI0_PHYC),
+	DSI_REG(DSI0_HS_CLT0),
+	DSI_REG(DSI0_HS_CLT1),
+	DSI_REG(DSI0_HS_CLT2),
+	DSI_REG(DSI0_HS_DLT3),
+	DSI_REG(DSI0_HS_DLT4),
+	DSI_REG(DSI0_HS_DLT5),
+	DSI_REG(DSI0_HS_DLT6),
+	DSI_REG(DSI0_HS_DLT7),
+	DSI_REG(DSI0_PHY_AFEC0),
+	DSI_REG(DSI0_PHY_AFEC1),
+	DSI_REG(DSI0_ID),
+};
+
+static const struct {
+	u32 reg;
+	const char *name;
+} dsi1_regs[] = {
+	DSI_REG(DSI1_CTRL),
+	DSI_REG(DSI1_STAT),
+	DSI_REG(DSI1_HSTX_TO_CNT),
+	DSI_REG(DSI1_LPRX_TO_CNT),
+	DSI_REG(DSI1_TA_TO_CNT),
+	DSI_REG(DSI1_PR_TO_CNT),
+	DSI_REG(DSI1_DISP0_CTRL),
+	DSI_REG(DSI1_DISP1_CTRL),
+	DSI_REG(DSI1_INT_STAT),
+	DSI_REG(DSI1_INT_EN),
+	DSI_REG(DSI1_PHYC),
+	DSI_REG(DSI1_HS_CLT0),
+	DSI_REG(DSI1_HS_CLT1),
+	DSI_REG(DSI1_HS_CLT2),
+	DSI_REG(DSI1_HS_DLT3),
+	DSI_REG(DSI1_HS_DLT4),
+	DSI_REG(DSI1_HS_DLT5),
+	DSI_REG(DSI1_HS_DLT6),
+	DSI_REG(DSI1_HS_DLT7),
+	DSI_REG(DSI1_PHY_AFEC0),
+	DSI_REG(DSI1_PHY_AFEC1),
+	DSI_REG(DSI1_ID),
+};
+
+static void vc4_dsi_dump_regs(struct vc4_dsi *dsi)
+{
+	int i;
+
+	if (dsi->port == 0) {
+		for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) {
+			DRM_INFO("0x%04x (%s): 0x%08x\n",
+				 dsi0_regs[i].reg, dsi0_regs[i].name,
+				 DSI_READ(dsi0_regs[i].reg));
+		}
+	} else {
+		for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) {
+			DRM_INFO("0x%04x (%s): 0x%08x\n",
+				 dsi1_regs[i].reg, dsi1_regs[i].name,
+				 DSI_READ(dsi1_regs[i].reg));
+		}
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *drm = node->minor->dev;
+	struct vc4_dev *vc4 = to_vc4_dev(drm);
+	int dsi_index = (uintptr_t)node->info_ent->data;
+	struct vc4_dsi *dsi = (dsi_index == 1 ? vc4->dsi1 : NULL);
+	int i;
+
+	if (!dsi)
+		return 0;
+
+	if (dsi->port == 0) {
+		for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) {
+			seq_printf(m, "0x%04x (%s): 0x%08x\n",
+				   dsi0_regs[i].reg, dsi0_regs[i].name,
+				   DSI_READ(dsi0_regs[i].reg));
+		}
+	} else {
+		for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) {
+			seq_printf(m, "0x%04x (%s): 0x%08x\n",
+				   dsi1_regs[i].reg, dsi1_regs[i].name,
+				   DSI_READ(dsi1_regs[i].reg));
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static enum drm_connector_status
+vc4_dsi_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct vc4_dsi_connector *vc4_connector =
+		to_vc4_dsi_connector(connector);
+	struct vc4_dsi *dsi = vc4_connector->dsi;
+
+	if (dsi->panel)
+		return connector_status_connected;
+	else
+		return connector_status_disconnected;
+}
+
+static void vc4_dsi_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+}
+
+static int vc4_dsi_connector_get_modes(struct drm_connector *connector)
+{
+	struct vc4_dsi_connector *vc4_connector =
+		to_vc4_dsi_connector(connector);
+	struct vc4_dsi *dsi = vc4_connector->dsi;
+
+	if (dsi->panel)
+		return drm_panel_get_modes(dsi->panel);
+
+	return 0;
+}
+
+static const struct drm_connector_funcs vc4_dsi_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = vc4_dsi_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = vc4_dsi_connector_destroy,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs vc4_dsi_connector_helper_funcs = {
+	.get_modes = vc4_dsi_connector_get_modes,
+};
+
+static struct drm_connector *vc4_dsi_connector_init(struct drm_device *dev,
+						    struct vc4_dsi *dsi)
+{
+	struct drm_connector *connector = NULL;
+	struct vc4_dsi_connector *dsi_connector;
+	int ret = 0;
+
+	dsi_connector = devm_kzalloc(dev->dev, sizeof(*dsi_connector),
+				     GFP_KERNEL);
+	if (!dsi_connector) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	connector = &dsi_connector->base;
+
+	dsi_connector->dsi = dsi;
+
+	drm_connector_init(dev, connector, &vc4_dsi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DSI);
+	drm_connector_helper_add(connector, &vc4_dsi_connector_helper_funcs);
+
+	connector->polled = 0;
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	drm_mode_connector_attach_encoder(connector, dsi->encoder);
+
+	return connector;
+
+fail:
+	if (connector)
+		vc4_dsi_connector_destroy(connector);
+
+	return ERR_PTR(ret);
+}
+
+static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = {
+	.destroy = vc4_dsi_encoder_destroy,
+};
+
+static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch)
+{
+	u32 afec0 = DSI_PORT_READ(PHY_AFEC0);
+
+	if (latch)
+		afec0 |= DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS);
+	else
+		afec0 &= ~DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS);
+
+	DSI_PORT_WRITE(PHY_AFEC0, afec0);
+}
+
+/* Enters or exits Ultra Low Power State. */
+static void vc4_dsi_ulps(struct vc4_dsi *dsi, bool ulps)
+{
+	bool continuous = dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS;
+	u32 phyc_ulps = ((continuous ? DSI_PORT_BIT(PHYC_CLANE_ULPS) : 0) |
+			 DSI_PHYC_DLANE0_ULPS |
+			 (dsi->lanes > 1 ? DSI_PHYC_DLANE1_ULPS : 0) |
+			 (dsi->lanes > 2 ? DSI_PHYC_DLANE2_ULPS : 0) |
+			 (dsi->lanes > 3 ? DSI_PHYC_DLANE3_ULPS : 0));
+	u32 stat_ulps = ((continuous ? DSI1_STAT_PHY_CLOCK_ULPS : 0) |
+			 DSI1_STAT_PHY_D0_ULPS |
+			 (dsi->lanes > 1 ? DSI1_STAT_PHY_D1_ULPS : 0) |
+			 (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_ULPS : 0) |
+			 (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_ULPS : 0));
+	u32 stat_stop = ((continuous ? DSI1_STAT_PHY_CLOCK_STOP : 0) |
+			 DSI1_STAT_PHY_D0_STOP |
+			 (dsi->lanes > 1 ? DSI1_STAT_PHY_D1_STOP : 0) |
+			 (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_STOP : 0) |
+			 (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_STOP : 0));
+	int ret;
+
+	DSI_PORT_WRITE(STAT, stat_ulps);
+	DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) | phyc_ulps);
+	ret = wait_for((DSI_PORT_READ(STAT) & stat_ulps) == stat_ulps, 200);
+	if (ret) {
+		dev_warn(&dsi->pdev->dev,
+			 "Timeout waiting for DSI ULPS entry: STAT 0x%08x",
+			 DSI_PORT_READ(STAT));
+		DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps);
+		vc4_dsi_latch_ulps(dsi, false);
+		return;
+	}
+
+	/* The DSI module can't be disabled while the module is
+	 * generating ULPS state.  So, to be able to disable the
+	 * module, we have the AFE latch the ULPS state and continue
+	 * on to having the module enter STOP.
+	 */
+	vc4_dsi_latch_ulps(dsi, ulps);
+
+	DSI_PORT_WRITE(STAT, stat_stop);
+	DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps);
+	ret = wait_for((DSI_PORT_READ(STAT) & stat_stop) == stat_stop, 200);
+	if (ret) {
+		dev_warn(&dsi->pdev->dev,
+			 "Timeout waiting for DSI STOP entry: STAT 0x%08x",
+			 DSI_PORT_READ(STAT));
+		DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps);
+		return;
+	}
+}
+
+static u32
+dsi_hs_timing(u32 ui_ns, u32 ns, u32 ui)
+{
+	/* The HS timings have to be rounded up to a multiple of 8
+	 * because we're using the byte clock.
+	 */
+	return roundup(ui + DIV_ROUND_UP(ns, ui_ns), 8);
+}
+
+/* ESC always runs at 100Mhz. */
+#define ESC_TIME_NS 10
+
+static u32
+dsi_esc_timing(u32 ns)
+{
+	return DIV_ROUND_UP(ns, ESC_TIME_NS);
+}
+
+static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+	struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
+	struct vc4_dsi *dsi = vc4_encoder->dsi;
+	struct device *dev = &dsi->pdev->dev;
+
+	drm_panel_disable(dsi->panel);
+
+	vc4_dsi_ulps(dsi, true);
+
+	drm_panel_unprepare(dsi->panel);
+
+	clk_disable_unprepare(dsi->pll_phy_clock);
+	clk_disable_unprepare(dsi->escape_clock);
+	clk_disable_unprepare(dsi->pixel_clock);
+
+	pm_runtime_put(dev);
+}
+
+static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+	struct drm_display_mode *mode = &encoder->crtc->mode;
+	struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
+	struct vc4_dsi *dsi = vc4_encoder->dsi;
+	struct device *dev = &dsi->pdev->dev;
+	u32 format = 0, divider = 0;
+	bool debug_dump_regs = false;
+	unsigned long hs_clock;
+	u32 ui_ns;
+	/* Minimum LP state duration in escape clock cycles. */
+	u32 lpx = dsi_esc_timing(60);
+	unsigned long pixel_clock_hz = mode->clock * 1000;
+	unsigned long dsip_clock;
+	unsigned long phy_clock;
+	int ret;
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret) {
+		DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
+		return;
+	}
+
+	ret = drm_panel_prepare(dsi->panel);
+	if (ret) {
+		DRM_ERROR("Panel failed to prepare\n");
+		return;
+	}
+
+	if (debug_dump_regs) {
+		DRM_INFO("DSI regs before:\n");
+		vc4_dsi_dump_regs(dsi);
+	}
+
+	switch (dsi->format) {
+	case MIPI_DSI_FMT_RGB888:
+		format = DSI_PFORMAT_RGB888;
+		divider = 24 / dsi->lanes;
+		break;
+	case MIPI_DSI_FMT_RGB666:
+		format = DSI_PFORMAT_RGB666;
+		divider = 24 / dsi->lanes;
+		break;
+	case MIPI_DSI_FMT_RGB666_PACKED:
+		format = DSI_PFORMAT_RGB666_PACKED;
+		divider = 18 / dsi->lanes;
+		break;
+	case MIPI_DSI_FMT_RGB565:
+		format = DSI_PFORMAT_RGB565;
+		divider = 16 / dsi->lanes;
+		break;
+	}
+
+	phy_clock = pixel_clock_hz * divider;
+	ret = clk_set_rate(dsi->pll_phy_clock, phy_clock);
+	if (ret) {
+		dev_err(&dsi->pdev->dev,
+			"Failed to set phy clock to %ld: %d\n", phy_clock, ret);
+	}
+
+	/* Reset the DSI and all its fifos. */
+	DSI_PORT_WRITE(CTRL,
+		       DSI_CTRL_SOFT_RESET_CFG |
+		       DSI_PORT_BIT(CTRL_RESET_FIFOS));
+
+	DSI_PORT_WRITE(CTRL,
+		       DSI_CTRL_HSDT_EOT_DISABLE |
+		       DSI_CTRL_RX_LPDT_EOT_DISABLE);
+
+	/* Clear all stat bits so we see what has happened during enable. */
+	DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT));
+
+	/* Set AFE CTR00/CTR1 to release powerdown of analog. */
+	if (dsi->port == 0) {
+		u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) |
+			     VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ));
+
+		if (dsi->lanes < 2)
+			afec0 |= DSI0_PHY_AFEC0_PD_DLANE1;
+
+		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO))
+			afec0 |= DSI0_PHY_AFEC0_RESET;
+
+		DSI_PORT_WRITE(PHY_AFEC0, afec0);
+
+		DSI_PORT_WRITE(PHY_AFEC1,
+			       VC4_SET_FIELD(6,  DSI0_PHY_AFEC1_IDR_DLANE1) |
+			       VC4_SET_FIELD(6,  DSI0_PHY_AFEC1_IDR_DLANE0) |
+			       VC4_SET_FIELD(6,  DSI0_PHY_AFEC1_IDR_CLANE));
+	} else {
+		u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) |
+			     VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ) |
+			     VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_CLANE) |
+			     VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE0) |
+			     VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE1) |
+			     VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE2) |
+			     VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE3));
+
+		if (dsi->lanes < 4)
+			afec0 |= DSI1_PHY_AFEC0_PD_DLANE3;
+		if (dsi->lanes < 3)
+			afec0 |= DSI1_PHY_AFEC0_PD_DLANE2;
+		if (dsi->lanes < 2)
+			afec0 |= DSI1_PHY_AFEC0_PD_DLANE1;
+
+		afec0 |= DSI1_PHY_AFEC0_RESET;
+
+		DSI_PORT_WRITE(PHY_AFEC0, afec0);
+
+		DSI_PORT_WRITE(PHY_AFEC1, 0);
+
+		/* AFEC reset hold time */
+		mdelay(1);
+	}
+
+	ret = clk_prepare_enable(dsi->escape_clock);
+	if (ret) {
+		DRM_ERROR("Failed to turn on DSI escape clock: %d\n", ret);
+		return;
+	}
+
+	ret = clk_prepare_enable(dsi->pll_phy_clock);
+	if (ret) {
+		DRM_ERROR("Failed to turn on DSI PLL: %d\n", ret);
+		return;
+	}
+
+	hs_clock = clk_get_rate(dsi->pll_phy_clock);
+
+	/* Yes, we set the DSI0P/DSI1P pixel clock to the byte rate,
+	 * not the pixel clock rate.  DSIxP take from the APHY's byte,
+	 * DDR2, or DDR4 clock (we use byte) and feed into the PV at
+	 * that rate.  Separately, a value derived from PIX_CLK_DIV
+	 * and HS_CLKC is fed into the PV to divide down to the actual
+	 * pixel clock for pushing pixels into DSI.
+	 */
+	dsip_clock = phy_clock / 8;
+	ret = clk_set_rate(dsi->pixel_clock, dsip_clock);
+	if (ret) {
+		dev_err(dev, "Failed to set pixel clock to %ldHz: %d\n",
+			dsip_clock, ret);
+	}
+
+	ret = clk_prepare_enable(dsi->pixel_clock);
+	if (ret) {
+		DRM_ERROR("Failed to turn on DSI pixel clock: %d\n", ret);
+		return;
+	}
+
+	/* How many ns one DSI unit interval is.  Note that the clock
+	 * is DDR, so there's an extra divide by 2.
+	 */
+	ui_ns = DIV_ROUND_UP(500000000, hs_clock);
+
+	DSI_PORT_WRITE(HS_CLT0,
+		       VC4_SET_FIELD(dsi_hs_timing(ui_ns, 262, 0),
+				     DSI_HS_CLT0_CZERO) |
+		       VC4_SET_FIELD(dsi_hs_timing(ui_ns, 0, 8),
+				     DSI_HS_CLT0_CPRE) |
+		       VC4_SET_FIELD(dsi_hs_timing(ui_ns, 38, 0),
+				     DSI_HS_CLT0_CPREP));
+
+	DSI_PORT_WRITE(HS_CLT1,
+		       VC4_SET_FIELD(dsi_hs_timing(ui_ns, 60, 0),
+				     DSI_HS_CLT1_CTRAIL) |
+		       VC4_SET_FIELD(dsi_hs_timing(ui_ns, 60, 52),
+				     DSI_HS_CLT1_CPOST));
+
+	DSI_PORT_WRITE(HS_CLT2,
+		       VC4_SET_FIELD(dsi_hs_timing(ui_ns, 1000000, 0),
+				     DSI_HS_CLT2_WUP));
+
+	DSI_PORT_WRITE(HS_DLT3,
+		       VC4_SET_FIELD(dsi_hs_timing(ui_ns, 100, 0),
+				     DSI_HS_DLT3_EXIT) |
+		       VC4_SET_FIELD(dsi_hs_timing(ui_ns, 105, 6),
+				     DSI_HS_DLT3_ZERO) |
+		       VC4_SET_FIELD(dsi_hs_timing(ui_ns, 40, 4),
+				     DSI_HS_DLT3_PRE));
+
+	DSI_PORT_WRITE(HS_DLT4,
+		       VC4_SET_FIELD(dsi_hs_timing(ui_ns, lpx * ESC_TIME_NS, 0),
+				     DSI_HS_DLT4_LPX) |
+		       VC4_SET_FIELD(max(dsi_hs_timing(ui_ns, 0, 8),
+					 dsi_hs_timing(ui_ns, 60, 4)),
+				     DSI_HS_DLT4_TRAIL) |
+		       VC4_SET_FIELD(0, DSI_HS_DLT4_ANLAT));
+
+	DSI_PORT_WRITE(HS_DLT5, VC4_SET_FIELD(dsi_hs_timing(ui_ns, 1000, 5000),
+					      DSI_HS_DLT5_INIT));
+
+	DSI_PORT_WRITE(HS_DLT6,
+		       VC4_SET_FIELD(lpx * 5, DSI_HS_DLT6_TA_GET) |
+		       VC4_SET_FIELD(lpx, DSI_HS_DLT6_TA_SURE) |
+		       VC4_SET_FIELD(lpx * 4, DSI_HS_DLT6_TA_GO) |
+		       VC4_SET_FIELD(lpx, DSI_HS_DLT6_LP_LPX));
+
+	DSI_PORT_WRITE(HS_DLT7,
+		       VC4_SET_FIELD(dsi_esc_timing(1000000),
+				     DSI_HS_DLT7_LP_WUP));
+
+	DSI_PORT_WRITE(PHYC,
+		       DSI_PHYC_DLANE0_ENABLE |
+		       (dsi->lanes >= 2 ? DSI_PHYC_DLANE1_ENABLE : 0) |
+		       (dsi->lanes >= 3 ? DSI_PHYC_DLANE2_ENABLE : 0) |
+		       (dsi->lanes >= 4 ? DSI_PHYC_DLANE3_ENABLE : 0) |
+		       DSI_PORT_BIT(PHYC_CLANE_ENABLE) |
+		       ((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ?
+			0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) |
+		       (dsi->port == 0 ?
+			VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) :
+			VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT)));
+
+	DSI_PORT_WRITE(CTRL,
+		       DSI_PORT_READ(CTRL) |
+		       DSI_CTRL_CAL_BYTE);
+
+	/* HS timeout in HS clock cycles: disabled. */
+	DSI_PORT_WRITE(HSTX_TO_CNT, 0);
+	/* LP receive timeout in HS clocks. */
+	DSI_PORT_WRITE(LPRX_TO_CNT, 0xffffff);
+	/* Bus turnaround timeout */
+	DSI_PORT_WRITE(TA_TO_CNT, 100000);
+	/* Display reset sequence timeout */
+	DSI_PORT_WRITE(PR_TO_CNT, 100000);
+
+	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+		DSI_PORT_WRITE(DISP0_CTRL,
+			       VC4_SET_FIELD(divider, DSI_DISP0_PIX_CLK_DIV) |
+			       VC4_SET_FIELD(format, DSI_DISP0_PFORMAT) |
+			       VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
+					     DSI_DISP0_LP_STOP_CTRL) |
+			       DSI_DISP0_ST_END |
+			       DSI_DISP0_ENABLE);
+	} else {
+		DSI_PORT_WRITE(DISP0_CTRL,
+			       DSI_DISP0_COMMAND_MODE |
+			       DSI_DISP0_ENABLE);
+	}
+
+	/* Set up DISP1 for transferring long command payloads through
+	 * the pixfifo.
+	 */
+	DSI_PORT_WRITE(DISP1_CTRL,
+		       VC4_SET_FIELD(DSI_DISP1_PFORMAT_32BIT_LE,
+				     DSI_DISP1_PFORMAT) |
+		       DSI_DISP1_ENABLE);
+
+	/* Ungate the block. */
+	if (dsi->port == 0)
+		DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0);
+	else
+		DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN);
+
+	/* Bring AFE out of reset. */
+	if (dsi->port == 0) {
+	} else {
+		DSI_PORT_WRITE(PHY_AFEC0,
+			       DSI_PORT_READ(PHY_AFEC0) &
+			       ~DSI1_PHY_AFEC0_RESET);
+	}
+
+	vc4_dsi_ulps(dsi, false);
+
+	if (debug_dump_regs) {
+		DRM_INFO("DSI regs after:\n");
+		vc4_dsi_dump_regs(dsi);
+	}
+
+	ret = drm_panel_enable(dsi->panel);
+	if (ret) {
+		DRM_ERROR("Panel failed to enable\n");
+		drm_panel_unprepare(dsi->panel);
+		return;
+	}
+}
+
+static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
+				     const struct mipi_dsi_msg *msg)
+{
+	struct vc4_dsi *dsi = host_to_dsi(host);
+	struct mipi_dsi_packet packet;
+	u32 pkth = 0, pktc = 0;
+	int i, ret;
+	bool is_long = mipi_dsi_packet_format_is_long(msg->type);
+	u32 cmd_fifo_len = 0, pix_fifo_len = 0;
+
+	mipi_dsi_create_packet(&packet, msg);
+
+	pkth |= VC4_SET_FIELD(packet.header[0], DSI_TXPKT1H_BC_DT);
+	pkth |= VC4_SET_FIELD(packet.header[1] |
+			      (packet.header[2] << 8),
+			      DSI_TXPKT1H_BC_PARAM);
+	if (is_long) {
+		/* Divide data across the various FIFOs we have available.
+		 * The command FIFO takes byte-oriented data, but is of
+		 * limited size. The pixel FIFO (never actually used for
+		 * pixel data in reality) is word oriented, and substantially
+		 * larger. So, we use the pixel FIFO for most of the data,
+		 * sending the residual bytes in the command FIFO at the start.
+		 *
+		 * With this arrangement, the command FIFO will never get full.
+		 */
+		if (packet.payload_length <= 16) {
+			cmd_fifo_len = packet.payload_length;
+			pix_fifo_len = 0;
+		} else {
+			cmd_fifo_len = (packet.payload_length %
+					DSI_PIX_FIFO_WIDTH);
+			pix_fifo_len = ((packet.payload_length - cmd_fifo_len) /
+					DSI_PIX_FIFO_WIDTH);
+		}
+
+		WARN_ON_ONCE(pix_fifo_len >= DSI_PIX_FIFO_DEPTH);
+
+		pkth |= VC4_SET_FIELD(cmd_fifo_len, DSI_TXPKT1H_BC_CMDFIFO);
+	}
+
+	if (msg->rx_len) {
+		pktc |= VC4_SET_FIELD(DSI_TXPKT1C_CMD_CTRL_RX,
+				      DSI_TXPKT1C_CMD_CTRL);
+	} else {
+		pktc |= VC4_SET_FIELD(DSI_TXPKT1C_CMD_CTRL_TX,
+				      DSI_TXPKT1C_CMD_CTRL);
+	}
+
+	for (i = 0; i < cmd_fifo_len; i++)
+		DSI_PORT_WRITE(TXPKT_CMD_FIFO, packet.payload[i]);
+	for (i = 0; i < pix_fifo_len; i++) {
+		const u8 *pix = packet.payload + cmd_fifo_len + i * 4;
+
+		DSI_PORT_WRITE(TXPKT_PIX_FIFO,
+			       pix[0] |
+			       pix[1] << 8 |
+			       pix[2] << 16 |
+			       pix[3] << 24);
+	}
+
+	if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+		pktc |= DSI_TXPKT1C_CMD_MODE_LP;
+	if (is_long)
+		pktc |= DSI_TXPKT1C_CMD_TYPE_LONG;
+
+	/* Send one copy of the packet.  Larger repeats are used for pixel
+	 * data in command mode.
+	 */
+	pktc |= VC4_SET_FIELD(1, DSI_TXPKT1C_CMD_REPEAT);
+
+	pktc |= DSI_TXPKT1C_CMD_EN;
+	if (pix_fifo_len) {
+		pktc |= VC4_SET_FIELD(DSI_TXPKT1C_DISPLAY_NO_SECONDARY,
+				      DSI_TXPKT1C_DISPLAY_NO);
+	} else {
+		pktc |= VC4_SET_FIELD(DSI_TXPKT1C_DISPLAY_NO_SHORT,
+				      DSI_TXPKT1C_DISPLAY_NO);
+	}
+
+	/* Enable the appropriate interrupt for the transfer completion. */
+	dsi->xfer_result = 0;
+	reinit_completion(&dsi->xfer_completion);
+	DSI_PORT_WRITE(INT_STAT, DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF);
+	if (msg->rx_len) {
+		DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
+					DSI1_INT_PHY_DIR_RTF));
+	} else {
+		DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
+					DSI1_INT_TXPKT1_DONE));
+	}
+
+	/* Send the packet. */
+	DSI_PORT_WRITE(TXPKT1H, pkth);
+	DSI_PORT_WRITE(TXPKT1C, pktc);
+
+	if (!wait_for_completion_timeout(&dsi->xfer_completion,
+					 msecs_to_jiffies(1000))) {
+		dev_err(&dsi->pdev->dev, "transfer interrupt wait timeout");
+		dev_err(&dsi->pdev->dev, "instat: 0x%08x\n",
+			DSI_PORT_READ(INT_STAT));
+		ret = -ETIMEDOUT;
+	} else {
+		ret = dsi->xfer_result;
+	}
+
+	DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED);
+
+	if (ret)
+		goto reset_fifo_and_return;
+
+	if (ret == 0 && msg->rx_len) {
+		u32 rxpkt1h = DSI_PORT_READ(RXPKT1H);
+		u8 *msg_rx = msg->rx_buf;
+
+		if (rxpkt1h & DSI_RXPKT1H_PKT_TYPE_LONG) {
+			u32 rxlen = VC4_GET_FIELD(rxpkt1h,
+						  DSI_RXPKT1H_BC_PARAM);
+
+			if (rxlen != msg->rx_len) {
+				DRM_ERROR("DSI returned %db, expecting %db\n",
+					  rxlen, (int)msg->rx_len);
+				ret = -ENXIO;
+				goto reset_fifo_and_return;
+			}
+
+			for (i = 0; i < msg->rx_len; i++)
+				msg_rx[i] = DSI_READ(DSI1_RXPKT_FIFO);
+		} else {
+			/* FINISHME: Handle AWER */
+
+			msg_rx[0] = VC4_GET_FIELD(rxpkt1h,
+						  DSI_RXPKT1H_SHORT_0);
+			if (msg->rx_len > 1) {
+				msg_rx[1] = VC4_GET_FIELD(rxpkt1h,
+							  DSI_RXPKT1H_SHORT_1);
+			}
+		}
+	}
+
+	return ret;
+
+reset_fifo_and_return:
+	DRM_ERROR("DSI transfer failed, resetting: %d\n", ret);
+
+	DSI_PORT_WRITE(TXPKT1C, DSI_PORT_READ(TXPKT1C) & ~DSI_TXPKT1C_CMD_EN);
+	udelay(1);
+	DSI_PORT_WRITE(CTRL,
+		       DSI_PORT_READ(CTRL) |
+		       DSI_PORT_BIT(CTRL_RESET_FIFOS));
+
+	DSI_PORT_WRITE(TXPKT1C, 0);
+	DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED);
+	return ret;
+}
+
+static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
+			       struct mipi_dsi_device *device)
+{
+	struct vc4_dsi *dsi = host_to_dsi(host);
+	int ret = 0;
+
+	dsi->lanes = device->lanes;
+	dsi->channel = device->channel;
+	dsi->format = device->format;
+	dsi->mode_flags = device->mode_flags;
+
+	if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+		dev_err(&dsi->pdev->dev,
+			"Only VIDEO mode panels supported currently.\n");
+		return 0;
+	}
+
+	dsi->panel = of_drm_find_panel(device->dev.of_node);
+	if (!dsi->panel)
+		return 0;
+
+	ret = drm_panel_attach(dsi->panel, dsi->connector);
+	if (ret != 0)
+		return ret;
+
+	drm_helper_hpd_irq_event(dsi->connector->dev);
+
+	return 0;
+}
+
+static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
+			       struct mipi_dsi_device *device)
+{
+	struct vc4_dsi *dsi = host_to_dsi(host);
+
+	if (dsi->panel) {
+		int ret = drm_panel_detach(dsi->panel);
+
+		if (ret)
+			return ret;
+
+		dsi->panel = NULL;
+
+		drm_helper_hpd_irq_event(dsi->connector->dev);
+	}
+
+	return 0;
+}
+
+static const struct mipi_dsi_host_ops vc4_dsi_host_ops = {
+	.attach = vc4_dsi_host_attach,
+	.detach = vc4_dsi_host_detach,
+	.transfer = vc4_dsi_host_transfer,
+};
+
+static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
+	.disable = vc4_dsi_encoder_disable,
+	.enable = vc4_dsi_encoder_enable,
+};
+
+static const struct of_device_id vc4_dsi_dt_match[] = {
+	{ .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 },
+	{}
+};
+
+static void dsi_handle_error(struct vc4_dsi *dsi,
+			     irqreturn_t *ret, u32 stat, u32 bit,
+			     const char *type)
+{
+	if (!(stat & bit))
+		return;
+
+	DRM_ERROR("DSI%d: %s error\n", dsi->port, type);
+	*ret = IRQ_HANDLED;
+}
+
+static irqreturn_t vc4_dsi_irq_handler(int irq, void *data)
+{
+	struct vc4_dsi *dsi = data;
+	u32 stat = DSI_PORT_READ(INT_STAT);
+	irqreturn_t ret = IRQ_NONE;
+
+	DSI_PORT_WRITE(INT_STAT, stat);
+
+	dsi_handle_error(dsi, &ret, stat,
+			 DSI1_INT_ERR_SYNC_ESC, "LPDT sync");
+	dsi_handle_error(dsi, &ret, stat,
+			 DSI1_INT_ERR_CONTROL, "data lane 0 sequence");
+	dsi_handle_error(dsi, &ret, stat,
+			 DSI1_INT_ERR_CONT_LP0, "LP0 contention");
+	dsi_handle_error(dsi, &ret, stat,
+			 DSI1_INT_ERR_CONT_LP1, "LP1 contention");
+	dsi_handle_error(dsi, &ret, stat,
+			 DSI1_INT_HSTX_TO, "HSTX timeout");
+	dsi_handle_error(dsi, &ret, stat,
+			 DSI1_INT_LPRX_TO, "LPRX timeout");
+	dsi_handle_error(dsi, &ret, stat,
+			 DSI1_INT_TA_TO, "turnaround timeout");
+	dsi_handle_error(dsi, &ret, stat,
+			 DSI1_INT_PR_TO, "peripheral reset timeout");
+
+	if (stat & (DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF)) {
+		complete(&dsi->xfer_completion);
+		ret = IRQ_HANDLED;
+	} else if (stat & DSI1_INT_HSTX_TO) {
+		complete(&dsi->xfer_completion);
+		dsi->xfer_result = -ETIMEDOUT;
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+/**
+ * Exposes clocks generated by the analog PHY that are consumed by
+ * CPRMAN (clk-bcm2835.c).
+ */
+static int
+vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
+{
+	struct device *dev = &dsi->pdev->dev;
+	const char *parent_name = __clk_get_name(dsi->pll_phy_clock);
+	static const struct {
+		const char *dsi0_name, *dsi1_name;
+		int div;
+	} phy_clocks[] = {
+		{ "dsi0_byte", "dsi1_byte", 8 },
+		{ "dsi0_ddr2", "dsi1_ddr2", 4 },
+		{ "dsi0_ddr", "dsi1_ddr", 2 },
+	};
+	int i;
+
+	dsi->clk_onecell = devm_kzalloc(dev,
+					sizeof(*dsi->clk_onecell) +
+					ARRAY_SIZE(phy_clocks) *
+					sizeof(struct clk_hw *),
+					GFP_KERNEL);
+	if (!dsi->clk_onecell)
+		return -ENOMEM;
+	dsi->clk_onecell->num = ARRAY_SIZE(phy_clocks);
+
+	for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) {
+		struct clk_fixed_factor *fix = &dsi->phy_clocks[i];
+		struct clk_init_data init;
+		int ret;
+
+		/* We just use core fixed factor clock ops for the PHY
+		 * clocks.  The clocks are actually gated by the
+		 * PHY_AFEC0_DDRCLK_EN bits, which we should be
+		 * setting if we use the DDR/DDR2 clocks.  However,
+		 * vc4_dsi_encoder_enable() is setting up both AFEC0,
+		 * setting both our parent DSI PLL's rate and this
+		 * clock's rate, so it knows if DDR/DDR2 are going to
+		 * be used and could enable the gates itself.
+		 */
+		fix->mult = 1;
+		fix->div = phy_clocks[i].div;
+		fix->hw.init = &init;
+
+		memset(&init, 0, sizeof(init));
+		init.parent_names = &parent_name;
+		init.num_parents = 1;
+		if (dsi->port == 1)
+			init.name = phy_clocks[i].dsi1_name;
+		else
+			init.name = phy_clocks[i].dsi0_name;
+		init.ops = &clk_fixed_factor_ops;
+
+		ret = devm_clk_hw_register(dev, &fix->hw);
+		if (ret)
+			return ret;
+
+		dsi->clk_onecell->hws[i] = &fix->hw;
+	}
+
+	return of_clk_add_hw_provider(dev->of_node,
+				      of_clk_hw_onecell_get,
+				      dsi->clk_onecell);
+}
+
+static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *drm = dev_get_drvdata(master);
+	struct vc4_dev *vc4 = to_vc4_dev(drm);
+	struct vc4_dsi *dsi;
+	struct vc4_dsi_encoder *vc4_dsi_encoder;
+	const struct of_device_id *match;
+	dma_cap_mask_t dma_mask;
+	int ret;
+
+	dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+	if (!dsi)
+		return -ENOMEM;
+
+	match = of_match_device(vc4_dsi_dt_match, dev);
+	if (!match)
+		return -ENODEV;
+
+	dsi->port = (uintptr_t)match->data;
+
+	vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
+				       GFP_KERNEL);
+	if (!vc4_dsi_encoder)
+		return -ENOMEM;
+	vc4_dsi_encoder->base.type = VC4_ENCODER_TYPE_DSI1;
+	vc4_dsi_encoder->dsi = dsi;
+	dsi->encoder = &vc4_dsi_encoder->base.base;
+
+	dsi->pdev = pdev;
+	dsi->regs = vc4_ioremap_regs(pdev, 0);
+	if (IS_ERR(dsi->regs))
+		return PTR_ERR(dsi->regs);
+
+	if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
+		dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
+			DSI_PORT_READ(ID), DSI_ID_VALUE);
+		return -ENODEV;
+	}
+
+	/* DSI1 has a broken AXI slave that doesn't respond to writes
+	 * from the ARM.  It does handle writes from the DMA engine,
+	 * so set up a channel for talking to it.
+	 */
+	if (dsi->port == 1) {
+		dsi->reg_dma_mem = dma_alloc_coherent(dev, 4,
+						      &dsi->reg_dma_paddr,
+						      GFP_KERNEL);
+		if (!dsi->reg_dma_mem) {
+			DRM_ERROR("Failed to get DMA memory\n");
+			return -ENOMEM;
+		}
+
+		dma_cap_zero(dma_mask);
+		dma_cap_set(DMA_MEMCPY, dma_mask);
+		dsi->reg_dma_chan = dma_request_chan_by_mask(&dma_mask);
+		if (IS_ERR(dsi->reg_dma_chan)) {
+			ret = PTR_ERR(dsi->reg_dma_chan);
+			if (ret != -EPROBE_DEFER)
+				DRM_ERROR("Failed to get DMA channel: %d\n",
+					  ret);
+			return ret;
+		}
+
+		/* Get the physical address of the device's registers.  The
+		 * struct resource for the regs gives us the bus address
+		 * instead.
+		 */
+		dsi->reg_paddr = be32_to_cpup(of_get_address(dev->of_node,
+							     0, NULL, NULL));
+	}
+
+	init_completion(&dsi->xfer_completion);
+	/* At startup enable error-reporting interrupts and nothing else. */
+	DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED);
+	/* Clear any existing interrupt state. */
+	DSI_PORT_WRITE(INT_STAT, DSI_PORT_READ(INT_STAT));
+
+	ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+			       vc4_dsi_irq_handler, 0, "vc4 dsi", dsi);
+	if (ret) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get interrupt: %d\n", ret);
+		return ret;
+	}
+
+	dsi->escape_clock = devm_clk_get(dev, "escape");
+	if (IS_ERR(dsi->escape_clock)) {
+		ret = PTR_ERR(dsi->escape_clock);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get escape clock: %d\n", ret);
+		return ret;
+	}
+
+	dsi->pll_phy_clock = devm_clk_get(dev, "phy");
+	if (IS_ERR(dsi->pll_phy_clock)) {
+		ret = PTR_ERR(dsi->pll_phy_clock);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get phy clock: %d\n", ret);
+		return ret;
+	}
+
+	dsi->pixel_clock = devm_clk_get(dev, "pixel");
+	if (IS_ERR(dsi->pixel_clock)) {
+		ret = PTR_ERR(dsi->pixel_clock);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+		return ret;
+	}
+
+	/* The esc clock rate is supposed to always be 100Mhz. */
+	ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
+	if (ret) {
+		dev_err(dev, "Failed to set esc clock: %d\n", ret);
+		return ret;
+	}
+
+	ret = vc4_dsi_init_phy_clocks(dsi);
+	if (ret)
+		return ret;
+
+	if (dsi->port == 1)
+		vc4->dsi1 = dsi;
+
+	drm_encoder_init(drm, dsi->encoder, &vc4_dsi_encoder_funcs,
+			 DRM_MODE_ENCODER_DSI, NULL);
+	drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
+
+	dsi->connector = vc4_dsi_connector_init(drm, dsi);
+	if (IS_ERR(dsi->connector)) {
+		ret = PTR_ERR(dsi->connector);
+		goto err_destroy_encoder;
+	}
+
+	dsi->dsi_host.ops = &vc4_dsi_host_ops;
+	dsi->dsi_host.dev = dev;
+
+	mipi_dsi_host_register(&dsi->dsi_host);
+
+	dev_set_drvdata(dev, dsi);
+
+	pm_runtime_enable(dev);
+
+	return 0;
+
+err_destroy_encoder:
+	vc4_dsi_encoder_destroy(dsi->encoder);
+
+	return ret;
+}
+
+static void vc4_dsi_unbind(struct device *dev, struct device *master,
+			   void *data)
+{
+	struct drm_device *drm = dev_get_drvdata(master);
+	struct vc4_dev *vc4 = to_vc4_dev(drm);
+	struct vc4_dsi *dsi = dev_get_drvdata(dev);
+
+	pm_runtime_disable(dev);
+
+	vc4_dsi_connector_destroy(dsi->connector);
+	vc4_dsi_encoder_destroy(dsi->encoder);
+
+	mipi_dsi_host_unregister(&dsi->dsi_host);
+
+	clk_disable_unprepare(dsi->pll_phy_clock);
+	clk_disable_unprepare(dsi->escape_clock);
+
+	if (dsi->port == 1)
+		vc4->dsi1 = NULL;
+}
+
+static const struct component_ops vc4_dsi_ops = {
+	.bind   = vc4_dsi_bind,
+	.unbind = vc4_dsi_unbind,
+};
+
+static int vc4_dsi_dev_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &vc4_dsi_ops);
+}
+
+static int vc4_dsi_dev_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &vc4_dsi_ops);
+	return 0;
+}
+
+struct platform_driver vc4_dsi_driver = {
+	.probe = vc4_dsi_dev_probe,
+	.remove = vc4_dsi_dev_remove,
+	.driver = {
+		.name = "vc4_dsi",
+		.of_match_table = vc4_dsi_dt_match,
+	},
+};
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 6fbab1c82cb1..f7f7677f6d8d 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -141,8 +141,7 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
 	int ret, i;
 	u32 __iomem *dst_kernel;
 
-	ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS, 1,
-				 0);
+	ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
 	if (ret) {
 		DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
 			  ret);
@@ -170,6 +169,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
 	struct vc4_dev *vc4 = drm->dev_private;
 	struct vc4_hvs *hvs = NULL;
 	int ret;
+	u32 dispctrl;
 
 	hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
 	if (!hvs)
@@ -211,6 +211,19 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
 		return ret;
 
 	vc4->hvs = hvs;
+
+	dispctrl = HVS_READ(SCALER_DISPCTRL);
+
+	dispctrl |= SCALER_DISPCTRL_ENABLE;
+
+	/* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
+	 * be unused.
+	 */
+	dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+	dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
+
+	HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index be8dd8262f27..ad7925a9e0ea 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -231,7 +231,6 @@ int vc4_kms_load(struct drm_device *dev)
 	drm_mode_config_reset(dev);
 
 	vc4->fbdev = drm_fbdev_cma_init(dev, 32,
-					dev->mode_config.num_crtc,
 					dev->mode_config.num_connector);
 	if (IS_ERR(vc4->fbdev))
 		vc4->fbdev = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 110d1518f5d5..c1f06897136b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -514,9 +514,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 	if (lbm_size) {
 		if (!vc4_state->lbm.allocated) {
 			spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
-			ret = drm_mm_insert_node(&vc4->hvs->lbm_mm,
-						 &vc4_state->lbm,
-						 lbm_size, 32, 0);
+			ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
+							 &vc4_state->lbm,
+							 lbm_size, 32, 0, 0);
 			spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
 		} else {
 			WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 39f6886b2410..385405a2df05 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -190,6 +190,8 @@
 # define PV_VCONTROL_ODD_DELAY_SHIFT		6
 # define PV_VCONTROL_ODD_FIRST			BIT(5)
 # define PV_VCONTROL_INTERLACE			BIT(4)
+# define PV_VCONTROL_DSI			BIT(3)
+# define PV_VCONTROL_COMMAND			BIT(2)
 # define PV_VCONTROL_CONTINUOUS			BIT(1)
 # define PV_VCONTROL_VIDEN			BIT(0)
 
@@ -244,6 +246,9 @@
 # define SCALER_DISPCTRL_ENABLE			BIT(31)
 # define SCALER_DISPCTRL_DSP2EISLUR		BIT(15)
 # define SCALER_DISPCTRL_DSP1EISLUR		BIT(14)
+# define SCALER_DISPCTRL_DSP3_MUX_MASK		VC4_MASK(19, 18)
+# define SCALER_DISPCTRL_DSP3_MUX_SHIFT		18
+
 /* Enables Display 0 short line and underrun contribution to
  * SCALER_DISPSTAT_IRQDISP0.  Note that short frame contributions are
  * always enabled.
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index a04ef1c992d9..4217d66a5cc6 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
 	if (mem->type == VIA_MEM_AGP)
 		retval = drm_mm_insert_node(&dev_priv->agp_mm,
 					    &item->mm_node,
-					    tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
+					    tmpSize);
 	else
 		retval = drm_mm_insert_node(&dev_priv->vram_mm,
 					    &item->mm_node,
-					    tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
+					    tmpSize);
 	if (retval)
 		goto fail_alloc;
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 24f99fc9d8a4..163a67db8cf1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -387,7 +387,6 @@ int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
 	drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
 			      &virtio_gpu_fb_helper_funcs);
 	ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
-				 vgdev->num_scanouts,
 				 VIRTIO_GPUFB_CONN_LIMIT);
 	if (ret) {
 		kfree(vgfbdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index fae75394b5d0..30f989a0cafc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -166,10 +166,14 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
 	INIT_WORK(&vgdev->config_changed_work,
 		  virtio_gpu_config_changed_work_func);
 
+#ifdef __LITTLE_ENDIAN
 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
 		vgdev->has_virgl_3d = true;
 	DRM_INFO("virgl 3d acceleration %s\n",
-		 vgdev->has_virgl_3d ? "enabled" : "not available");
+		 vgdev->has_virgl_3d ? "enabled" : "not supported by host");
+#else
+	DRM_INFO("virgl 3d acceleration not supported by guest\n");
+#endif
 
 	ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
 					    callbacks, names);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index aa04fb0159a7..77cb7c627e09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -673,16 +673,10 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
  
 	memset(info->node, 0, sizeof(*info->node));
 	spin_lock_bh(&man->lock);
-	ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
-					 0, 0,
-					 DRM_MM_SEARCH_DEFAULT,
-					 DRM_MM_CREATE_DEFAULT);
+	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 	if (ret) {
 		vmw_cmdbuf_man_process(man);
-		ret = drm_mm_insert_node_generic(&man->mm, info->node,
-						 info->page_size, 0, 0,
-						 DRM_MM_SEARCH_DEFAULT,
-						 DRM_MM_CREATE_DEFAULT);
+		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 	}
 
 	spin_unlock_bh(&man->lock);
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 13081fed902d..5c6944a1e72c 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -141,7 +141,7 @@ static int zx_drm_bind(struct device *dev)
 	drm_mode_config_reset(drm);
 	drm_kms_helper_poll_init(drm);
 
-	priv->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+	priv->fbdev = drm_fbdev_cma_init(drm, 32,
 					 drm->mode_config.num_connector);
 	if (IS_ERR(priv->fbdev)) {
 		ret = PTR_ERR(priv->fbdev);
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
index 3629b2734db6..fbdfc8d7f3c7 100644
--- a/include/drm/bridge/mhl.h
+++ b/include/drm/bridge/mhl.h
@@ -15,6 +15,8 @@
 #ifndef __MHL_H__
 #define __MHL_H__
 
+#include <linux/types.h>
+
 /* Device Capabilities Registers */
 enum {
 	MHL_DCAP_DEV_STATE,
@@ -288,4 +290,87 @@ enum {
 /* Unsupported/unrecognized key code */
 #define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE	0x01
 
+enum mhl_burst_id {
+	MHL_BURST_ID_3D_VIC = 0x10,
+	MHL_BURST_ID_3D_DTD = 0x11,
+	MHL_BURST_ID_HEV_VIC = 0x20,
+	MHL_BURST_ID_HEV_DTDA = 0x21,
+	MHL_BURST_ID_HEV_DTDB = 0x22,
+	MHL_BURST_ID_VC_ASSIGN = 0x38,
+	MHL_BURST_ID_VC_CONFIRM = 0x39,
+	MHL_BURST_ID_AUD_DELAY = 0x40,
+	MHL_BURST_ID_ADT_BURSTID = 0x41,
+	MHL_BURST_ID_BIST_SETUP = 0x51,
+	MHL_BURST_ID_BIST_RETURN_STAT = 0x52,
+	MHL_BURST_ID_EMSC_SUPPORT = 0x61,
+	MHL_BURST_ID_HID_PAYLOAD = 0x62,
+	MHL_BURST_ID_BLK_RCV_BUFFER_INFO = 0x63,
+	MHL_BURST_ID_BITS_PER_PIXEL_FMT = 0x64,
+};
+
+struct mhl_burst_blk_rcv_buffer_info {
+	__be16 id;
+	__le16 size;
+} __packed;
+
+struct mhl3_burst_header {
+	__be16 id;
+	u8 checksum;
+	u8 total_entries;
+	u8 sequence_index;
+} __packed;
+
+struct mhl_burst_bits_per_pixel_fmt {
+	struct mhl3_burst_header hdr;
+	u8 num_entries;
+	struct {
+		u8 stream_id;
+		u8 pixel_format;
+	} __packed desc[0];
+} __packed;
+
+struct mhl_burst_emsc_support {
+	struct mhl3_burst_header hdr;
+	u8 num_entries;
+	__be16 burst_id[0];
+} __packed;
+
+struct mhl_burst_audio_descr {
+	struct mhl3_burst_header hdr;
+	u8 flags;
+	u8 short_desc[9];
+} __packed;
+
+/*
+ * MHL3 infoframe related definitions
+ */
+
+#define MHL3_IEEE_OUI		0x7ca61d
+#define MHL3_INFOFRAME_SIZE	15
+
+enum mhl3_video_format {
+	MHL3_VIDEO_FORMAT_NONE,
+	MHL3_VIDEO_FORMAT_3D,
+	MHL3_VIDEO_FORMAT_MULTI_VIEW,
+	MHL3_VIDEO_FORMAT_DUAL_3D
+};
+
+enum mhl3_3d_format_type {
+	MHL3_3D_FORMAT_TYPE_FS, /* frame sequential */
+	MHL3_3D_FORMAT_TYPE_TB, /* top-bottom */
+	MHL3_3D_FORMAT_TYPE_LR, /* left-right */
+	MHL3_3D_FORMAT_TYPE_FS_TB, /* frame sequential, top-bottom */
+	MHL3_3D_FORMAT_TYPE_FS_LR, /* frame sequential, left-right */
+	MHL3_3D_FORMAT_TYPE_TB_LR /* top-bottom, left-right */
+};
+
+struct mhl3_infoframe {
+	unsigned char version;
+	enum mhl3_video_format video_format;
+	enum mhl3_3d_format_type format_type;
+	bool sep_audio;
+	int hev_format;
+	int av_delay;
+};
+
 #endif /* __MHL_H__ */
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index d9c2f680f5ae..bce4a532836d 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -25,6 +25,8 @@
 
 #include <linux/ctype.h>
 
+uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision);
+
 void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
 				uint degamma_lut_size,
 				bool has_ctm,
@@ -33,29 +35,4 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
 int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
 				 int gamma_size);
 
-/**
- * drm_color_lut_extract - clamp and round LUT entries
- * @user_input: input value
- * @bit_precision: number of bits the hw LUT supports
- *
- * Extract a degamma/gamma LUT value provided by user (in the form of
- * &drm_color_lut entries) and round it to the precision supported by the
- * hardware.
- */
-static inline uint32_t drm_color_lut_extract(uint32_t user_input,
-					     uint32_t bit_precision)
-{
-	uint32_t val = user_input;
-	uint32_t max = 0xffff >> (16 - bit_precision);
-
-	/* Round only if we're not using full precision. */
-	if (bit_precision < 16) {
-		val += 1UL << (16 - bit_precision - 1);
-		val >>= 16 - bit_precision;
-	}
-
-	return clamp_val(val, 0, max);
-}
-
-
 #endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 732e85652d1e..5699f42195fe 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -102,6 +102,17 @@ struct drm_driver {
 	 *
 	 */
 	void (*unload) (struct drm_device *);
+
+	/**
+	 * @release:
+	 *
+	 * Optional callback for destroying device data after the final
+	 * reference is released, i.e. the device is being destroyed. Drivers
+	 * using this callback are responsible for calling drm_dev_fini()
+	 * to finalize the device and then freeing the struct themselves.
+	 */
+	void (*release) (struct drm_device *);
+
 	int (*set_busid)(struct drm_device *dev, struct drm_master *master);
 
 	/**
@@ -437,6 +448,8 @@ extern unsigned int drm_debug;
 int drm_dev_init(struct drm_device *dev,
 		 struct drm_driver *driver,
 		 struct device *parent);
+void drm_dev_fini(struct drm_device *dev);
+
 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
 				 struct device *parent);
 int drm_dev_register(struct drm_device *dev, unsigned long flags);
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 8dd6e5585e51..a5ecc0a58260 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -16,11 +16,10 @@ struct drm_plane;
 struct drm_plane_state;
 
 struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
-	unsigned int preferred_bpp, unsigned int num_crtc,
-	unsigned int max_conn_count, const struct drm_framebuffer_funcs *funcs);
+	unsigned int preferred_bpp, unsigned int max_conn_count,
+	const struct drm_framebuffer_funcs *funcs);
 struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
-	unsigned int preferred_bpp, unsigned int num_crtc,
-	unsigned int max_conn_count);
+	unsigned int preferred_bpp, unsigned int max_conn_count);
 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
 
 void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index e62e1cf22678..6f5acebb266a 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -236,8 +236,7 @@ struct drm_fb_helper {
 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
 			   const struct drm_fb_helper_funcs *funcs);
 int drm_fb_helper_init(struct drm_device *dev,
-		       struct drm_fb_helper *helper, int crtc_count,
-		       int max_conn);
+		       struct drm_fb_helper *helper, int max_conn);
 void drm_fb_helper_fini(struct drm_fb_helper *helper);
 int drm_fb_helper_blank(int blank, struct fb_info *info);
 int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
@@ -308,7 +307,7 @@ static inline void drm_fb_helper_prepare(struct drm_device *dev,
 }
 
 static inline int drm_fb_helper_init(struct drm_device *dev,
-		       struct drm_fb_helper *helper, int crtc_count,
+		       struct drm_fb_helper *helper,
 		       int max_conn)
 {
 	return 0;
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 3bddca8fd2b5..d81b0ba9921f 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -53,19 +53,62 @@
 #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
 #endif
 
-enum drm_mm_search_flags {
-	DRM_MM_SEARCH_DEFAULT =		0,
-	DRM_MM_SEARCH_BEST =		1 << 0,
-	DRM_MM_SEARCH_BELOW =		1 << 1,
-};
+/**
+ * enum drm_mm_insert_mode - control search and allocation behaviour
+ *
+ * The &struct drm_mm range manager supports finding a suitable modes using
+ * a number of search trees. These trees are oranised by size, by address and
+ * in most recent eviction order. This allows the user to find either the
+ * smallest hole to reuse, the lowest or highest address to reuse, or simply
+ * reuse the most recent eviction that fits. When allocating the &drm_mm_node
+ * from within the hole, the &drm_mm_insert_mode also dictate whether to
+ * allocate the lowest matching address or the highest.
+ */
+enum drm_mm_insert_mode {
+	/**
+	 * @DRM_MM_INSERT_BEST:
+	 *
+	 * Search for the smallest hole (within the search range) that fits
+	 * the desired node.
+	 *
+	 * Allocates the node from the bottom of the found hole.
+	 */
+	DRM_MM_INSERT_BEST = 0,
 
-enum drm_mm_allocator_flags {
-	DRM_MM_CREATE_DEFAULT =		0,
-	DRM_MM_CREATE_TOP =		1 << 0,
-};
+	/**
+	 * @DRM_MM_INSERT_LOW:
+	 *
+	 * Search for the lowest hole (address closest to 0, within the search
+	 * range) that fits the desired node.
+	 *
+	 * Allocates the node from the bottom of the found hole.
+	 */
+	DRM_MM_INSERT_LOW,
 
-#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
-#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
+	/**
+	 * @DRM_MM_INSERT_HIGH:
+	 *
+	 * Search for the highest hole (address closest to U64_MAX, within the
+	 * search range) that fits the desired node.
+	 *
+	 * Allocates the node from the *top* of the found hole. The specified
+	 * alignment for the node is applied to the base of the node
+	 * (&drm_mm_node.start).
+	 */
+	DRM_MM_INSERT_HIGH,
+
+	/**
+	 * @DRM_MM_INSERT_EVICT:
+	 *
+	 * Search for the most recently evicted hole (within the search range)
+	 * that fits the desired node. This is appropriate for use immediately
+	 * after performing an eviction scan (see drm_mm_scan_init()) and
+	 * removing the selected nodes to form a hole.
+	 *
+	 * Allocates the node from the bottom of the found hole.
+	 */
+	DRM_MM_INSERT_EVICT,
+};
 
 /**
  * struct drm_mm_node - allocated block in the DRM allocator
@@ -84,14 +127,16 @@ struct drm_mm_node {
 	/** @size: Size of the allocated block. */
 	u64 size;
 	/* private: */
+	struct drm_mm *mm;
 	struct list_head node_list;
 	struct list_head hole_stack;
 	struct rb_node rb;
-	unsigned hole_follows : 1;
-	unsigned allocated : 1;
-	bool scanned_block : 1;
+	struct rb_node rb_hole_size;
+	struct rb_node rb_hole_addr;
 	u64 __subtree_last;
-	struct drm_mm *mm;
+	u64 hole_size;
+	bool allocated : 1;
+	bool scanned_block : 1;
 #ifdef CONFIG_DRM_DEBUG_MM
 	depot_stack_handle_t stack;
 #endif
@@ -127,6 +172,8 @@ struct drm_mm {
 	struct drm_mm_node head_node;
 	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
 	struct rb_root interval_tree;
+	struct rb_root holes_size;
+	struct rb_root holes_addr;
 
 	unsigned long scan_active;
 };
@@ -155,7 +202,7 @@ struct drm_mm_scan {
 	u64 hit_end;
 
 	unsigned long color;
-	unsigned int flags;
+	enum drm_mm_insert_mode mode;
 };
 
 /**
@@ -208,7 +255,7 @@ static inline bool drm_mm_initialized(const struct drm_mm *mm)
  */
 static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
 {
-	return node->hole_follows;
+	return node->hole_size;
 }
 
 static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
@@ -291,17 +338,9 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
 #define drm_mm_for_each_node_safe(entry, next, mm) \
 	list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
 
-#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
-	for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
-	     &entry->hole_stack != &(mm)->hole_stack ? \
-	     hole_start = drm_mm_hole_node_start(entry), \
-	     hole_end = drm_mm_hole_node_end(entry), \
-	     1 : 0; \
-	     entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
-
 /**
  * drm_mm_for_each_hole - iterator to walk over all holes
- * @entry: &drm_mm_node used internally to track progress
+ * @pos: &drm_mm_node used internally to track progress
  * @mm: &drm_mm allocator to walk
  * @hole_start: ulong variable to assign the hole start to on each iteration
  * @hole_end: ulong variable to assign the hole end to on each iteration
@@ -314,57 +353,28 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
  * Implementation Note:
  * We need to inline list_for_each_entry in order to be able to set hole_start
  * and hole_end on each iteration while keeping the macro sane.
- *
- * The __drm_mm_for_each_hole version is similar, but with added support for
- * going backwards.
  */
-#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
-	__drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
+#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
+	for (pos = list_first_entry(&(mm)->hole_stack, \
+				    typeof(*pos), hole_stack); \
+	     &pos->hole_stack != &(mm)->hole_stack ? \
+	     hole_start = drm_mm_hole_node_start(pos), \
+	     hole_end = hole_start + pos->hole_size, \
+	     1 : 0; \
+	     pos = list_next_entry(pos, hole_stack))
 
 /*
  * Basic range manager support (drm_mm.c)
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
-int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
-					struct drm_mm_node *node,
-					u64 size,
-					u64 alignment,
-					unsigned long color,
-					u64 start,
-					u64 end,
-					enum drm_mm_search_flags sflags,
-					enum drm_mm_allocator_flags aflags);
-
-/**
- * drm_mm_insert_node_in_range - ranged search for space and insert @node
- * @mm: drm_mm to allocate from
- * @node: preallocate node to insert
- * @size: size of the allocation
- * @alignment: alignment of the allocation
- * @start: start of the allowed range for this node
- * @end: end of the allowed range for this node
- * @flags: flags to fine-tune the allocation
- *
- * This is a simplified version of drm_mm_insert_node_in_range_generic() with
- * @color set to 0.
- *
- * The preallocated node must be cleared to 0.
- *
- * Returns:
- * 0 on success, -ENOSPC if there's no suitable hole.
- */
-static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
-					      struct drm_mm_node *node,
-					      u64 size,
-					      u64 alignment,
-					      u64 start,
-					      u64 end,
-					      enum drm_mm_search_flags flags)
-{
-	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
-						   0, start, end, flags,
-						   DRM_MM_CREATE_DEFAULT);
-}
+int drm_mm_insert_node_in_range(struct drm_mm *mm,
+				struct drm_mm_node *node,
+				u64 size,
+				u64 alignment,
+				unsigned long color,
+				u64 start,
+				u64 end,
+				enum drm_mm_insert_mode mode);
 
 /**
  * drm_mm_insert_node_generic - search for space and insert @node
@@ -373,8 +383,7 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
  * @size: size of the allocation
  * @alignment: alignment of the allocation
  * @color: opaque tag value to use for this node
- * @sflags: flags to fine-tune the allocation search
- * @aflags: flags to fine-tune the allocation behavior
+ * @mode: fine-tune the allocation search and placement
  *
  * This is a simplified version of drm_mm_insert_node_in_range_generic() with no
  * range restrictions applied.
@@ -388,13 +397,11 @@ static inline int
 drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
 			   u64 size, u64 alignment,
 			   unsigned long color,
-			   enum drm_mm_search_flags sflags,
-			   enum drm_mm_allocator_flags aflags)
+			   enum drm_mm_insert_mode mode)
 {
-	return drm_mm_insert_node_in_range_generic(mm, node,
-						   size, alignment, 0,
-						   0, U64_MAX,
-						   sflags, aflags);
+	return drm_mm_insert_node_in_range(mm, node,
+					   size, alignment, color,
+					   0, U64_MAX, mode);
 }
 
 /**
@@ -402,8 +409,6 @@ drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
  * @mm: drm_mm to allocate from
  * @node: preallocate node to insert
  * @size: size of the allocation
- * @alignment: alignment of the allocation
- * @flags: flags to fine-tune the allocation
  *
  * This is a simplified version of drm_mm_insert_node_generic() with @color set
  * to 0.
@@ -415,13 +420,9 @@ drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
  */
 static inline int drm_mm_insert_node(struct drm_mm *mm,
 				     struct drm_mm_node *node,
-				     u64 size,
-				     u64 alignment,
-				     enum drm_mm_search_flags flags)
+				     u64 size)
 {
-	return drm_mm_insert_node_generic(mm, node,
-					  size, alignment, 0,
-					  flags, DRM_MM_CREATE_DEFAULT);
+	return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
 }
 
 void drm_mm_remove_node(struct drm_mm_node *node);
@@ -468,7 +469,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
 				 struct drm_mm *mm,
 				 u64 size, u64 alignment, unsigned long color,
 				 u64 start, u64 end,
-				 unsigned int flags);
+				 enum drm_mm_insert_mode mode);
 
 /**
  * drm_mm_scan_init - initialize lru scanning
@@ -477,7 +478,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
  * @size: size of the allocation
  * @alignment: alignment of the allocation
  * @color: opaque tag value to use for the allocation
- * @flags: flags to specify how the allocation will be performed afterwards
+ * @mode: fine-tune the allocation search and placement
  *
  * This is a simplified version of drm_mm_scan_init_with_range() with no range
  * restrictions applied.
@@ -494,12 +495,11 @@ static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
 				    u64 size,
 				    u64 alignment,
 				    unsigned long color,
-				    unsigned int flags)
+				    enum drm_mm_insert_mode mode)
 {
 	drm_mm_scan_init_with_range(scan, mm,
 				    size, alignment, color,
-				    0, U64_MAX,
-				    flags);
+				    0, U64_MAX, mode);
 }
 
 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
index 0098a522d9f4..ef8e2a8ad0af 100644
--- a/include/video/exynos5433_decon.h
+++ b/include/video/exynos5433_decon.h
@@ -89,6 +89,8 @@
 #define VIDCON0_ENVID_F			(1 << 0)
 
 /* VIDOUTCON0 */
+#define VIDOUT_INTERLACE_FIELD_F	(1 << 29)
+#define VIDOUT_INTERLACE_EN_F		(1 << 28)
 #define VIDOUT_LCD_ON			(1 << 24)
 #define VIDOUT_IF_F_MASK		(0x3 << 20)
 #define VIDOUT_RGB_IF			(0x0 << 20)